file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
for_stmt.rs
// b2c2-compiler crate::for_stmt // author: Leonardone @ NEETSDKASU use super::*; impl Compiler { // For ステートメント (Stepが定数) pub(super) fn compile_for_with_lit
for_stmt: &parser::Statement, step: i32, ) { let (exit_id, counter, is_ref, init, end, block) = if let parser::Statement::For { exit_id, counter, counter_is_ref: is_ref, init, end, step: _, block, } = for_stmt { (*exit_id, counter, *is_ref, init, end, block) } else { unreachable!("BUG"); }; let comment = format!( "For {counter} = {init} To {end} Step {step}", counter = counter, init = init, end = end, step = step ); self.add_debugger_hint(|| comment.clone()); self.nest_depth += 1; self.comment(comment.as_str()); // calc {end} let end_var = if let parser::Expr::LitInteger(_) = end { None } else { let end_var = self.get_temp_int_var_label(); // 想定では GR7 let end_reg = self.compile_int_expr(end); self.code(casl2::Command::A { code: casl2::A::St, r: end_reg, adr: casl2::Adr::label(&end_var), x: None, }); self.set_register_idle(end_reg); // GR7 解放のはず Some(end_var) }; // カウンタの準備 let counter_var = if is_ref { self.get_ref_int_var_label(counter) } else { self.get_int_var_label(counter) }; self.set_debugger_hint_extra_info(|| ExtraInfo::For { counter: counter_var.clone(), to: end_var.clone(), step: None, }); // calc {init} and assign to {counter} // 想定では GR7 let init_reg = self.compile_int_expr(init); let temp_reg = self.get_idle_register(); self.code(counter_var.st_value(init_reg, temp_reg)); self.set_register_idle(temp_reg); self.set_register_idle(init_reg); // GR7 解放のはず // ラベルの準備 let condition_label = self.get_new_jump_label(); let loop_label = self.get_loop_label(exit_id); let exit_label = self.get_exit_label(exit_id); // ループ継続の判定部分 // 想定では、 // 全てのレジスタ未使用 // になっているはず… let (saves, recovers) = self.get_save_registers_src(std::slice::from_ref(&casl2::Register::Gr1)); self.code(format!("{cond} NOP", cond = condition_label)); self.add_debugger_hint(|| "(begin for)".to_string()); self.set_debugger_hint_extra_info(|| ExtraInfo::RelatedCode(comment.clone())); self.code(saves); if let Some(end_var) = end_var.as_ref() { self.code(format!( r#" {ld_gr1_counter} CPA GR1,{end}"#, ld_gr1_counter = counter_var.ld_value(casl2::Register::Gr1), end = end_var )); } else if let parser::Expr::LitInteger(end) = end { self.code(format!( r#" {ld_gr1_counter} CPA GR1,={end}"#, ld_gr1_counter = counter_var.ld_value(casl2::Register::Gr1), end = *end as i16 )); } else { unreachable!("BUG"); } self.code(recovers); if step < 0 { self.code(format!(" JMI {exit}", exit = exit_label)); } else { self.code(format!(" JPL {exit}", exit = exit_label)); } // ループ内のコードを実行 for stmt in block.iter() { self.compile(stmt); } // ループ末尾 (カウンタの更新など) self.nest_depth -= 1; self.add_debugger_hint(|| format!("Next {counter}", counter = counter)); self.comment(format!("Next {counter}", counter = counter)); self.code(format!("{next} NOP", next = loop_label)); let (saves, recovers) = { use casl2::Register::*; self.get_save_registers_src(&[Gr1, Gr2]) }; self.code(saves); self.code(format!( r#" {lad_gr1_counterpos} LD GR2,0,GR1 LAD GR2,{step},GR2 ST GR2,0,GR1"#, lad_gr1_counterpos = counter_var.lad_pos(casl2::Register::Gr1), step = step )); self.code(recovers); self.add_debugger_hint(|| "(end for)".to_string()); self.set_debugger_hint_extra_info(|| ExtraInfo::RelatedCode(comment)); self.code(format!(" JUMP {cond}", cond = condition_label)); self.code(format!("{exit} NOP", exit = exit_label)); if let Some(end_var) = end_var { self.return_temp_int_var_label(end_var); } } // For ステートメント pub(super) fn compile_for(&mut self, for_stmt: &parser::Statement) { let (exit_id, counter, is_ref, init, end, step, block) = if let parser::Statement::For { exit_id, counter, counter_is_ref: is_ref, init, end, step: Some(step), block, } = for_stmt { (*exit_id, counter, *is_ref, init, end, step, block) } else { unreachable!("BUG"); }; let comment = format!( "For {counter} = {init} To {end} Step {step}", counter = counter, init = init, end = end, step = step ); self.add_debugger_hint(|| comment.clone()); self.nest_depth += 1; self.comment(comment.as_str()); // calc {step} let step_var = self.get_temp_int_var_label(); // 想定では GR7 let step_reg = self.compile_int_expr(step); self.code(casl2::Command::A { code: casl2::A::St, r: step_reg, adr: casl2::Adr::label(&step_var), x: None, }); self.set_register_idle(step_reg); // GR7 解放のはず // calc {end} let end_var = if let parser::Expr::LitInteger(_) = end { None } else { let end_var = self.get_temp_int_var_label(); // 想定では GR7 let end_reg = self.compile_int_expr(end); self.code(casl2::Command::A { code: casl2::A::St, r: end_reg, adr: casl2::Adr::label(&end_var), x: None, }); self.set_register_idle(end_reg); // GR7 解放のはず Some(end_var) }; // カウンタの準備 let counter_var = if is_ref { self.get_ref_int_var_label(counter) } else { self.get_int_var_label(counter) }; self.set_debugger_hint_extra_info(|| ExtraInfo::For { counter: counter_var.clone(), to: end_var.clone(), step: Some(step_var.clone()), }); // calc {init} and assign to {counter} // 想定では GR7 let init_reg = self.compile_int_expr(init); let temp_reg = self.get_idle_register(); self.code(counter_var.st_value(init_reg, temp_reg)); self.set_register_idle(temp_reg); self.set_register_idle(init_reg); // GR7 解放のはず // ラベルの準備 let condition_label = self.get_new_jump_label(); let negastep_label = self.get_new_jump_label(); let blockhead_label = self.get_new_jump_label(); let loop_label = self.get_loop_label(exit_id); let exit_label = self.get_exit_label(exit_id); // ループ継続の判定部分 // 想定では、 // 全てのレジスタ未使用 // になっているはず… let (saves, recovers) = self.get_save_registers_src(std::slice::from_ref(&casl2::Register::Gr1)); self.code(format!("{cond} NOP", cond = condition_label)); self.add_debugger_hint(|| "(begin for)".to_string()); self.set_debugger_hint_extra_info(|| ExtraInfo::RelatedCode(comment.clone())); self.code(saves); if let Some(end_var) = end_var.as_ref() { self.code(format!( r#" LD GR1,{step} JMI {nega} {ld_gr1_counter} CPA GR1,{end} JUMP {block} {nega} LD GR0,{end} {lad_gr1_counterpos} CPA GR0,0,GR1 {block} NOP"#, step = step_var, nega = negastep_label, ld_gr1_counter = counter_var.ld_value(casl2::Register::Gr1), end = end_var, block = blockhead_label, lad_gr1_counterpos = counter_var.lad_pos(casl2::Register::Gr1) )); } else if let parser::Expr::LitInteger(end) = end { self.code(format!( r#" LD GR1,{step} JMI {nega} {ld_gr1_counter} CPA GR1,={end} JUMP {block} {nega} LAD GR0,{end} {lad_gr1_counterpos} CPA GR0,0,GR1 {block} NOP"#, step = step_var, nega = negastep_label, ld_gr1_counter = counter_var.ld_value(casl2::Register::Gr1), end = *end as i16, block = blockhead_label, lad_gr1_counterpos = counter_var.lad_pos(casl2::Register::Gr1) )); } else { unreachable!("BUG"); } self.code(recovers); self.code(format!(" JPL {exit}", exit = exit_label)); // ループ内のコードを実行 for stmt in block.iter() { self.compile(stmt); } // ループ末尾 (カウンタの更新など) self.nest_depth -= 1; self.add_debugger_hint(|| format!("Next {counter}", counter = counter)); self.comment(format!("Next {counter}", counter = counter)); // 想定では、 // 全てのレジスタ未使用 // になっているはず… let (saves, recovers) = self.get_save_registers_src(std::slice::from_ref(&casl2::Register::Gr1)); self.code(format!("{next} NOP", next = loop_label)); self.code(saves); self.code(format!( r#" {lad_gr1_counterpos} LD GR0,0,GR1 ADDA GR0,{step} ST GR0,0,GR1"#, lad_gr1_counterpos = counter_var.lad_pos(casl2::Register::Gr1), step = step_var )); self.code(recovers); self.add_debugger_hint(|| "(end for)".to_string()); self.set_debugger_hint_extra_info(|| ExtraInfo::RelatedCode(comment)); self.code(format!(" JUMP {cond}", cond = condition_label)); self.code(format!("{exit} NOP", exit = exit_label)); if let Some(end_var) = end_var { self.return_temp_int_var_label(end_var); } self.return_temp_int_var_label(step_var); } }
eral_step( &mut self,
crd-resource-details.tsx
/** * Copyright (c) 2021 OpenLens Authors * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ import "./crd-resource-details.scss"; import React from "react"; import jsonPath from "jsonpath"; import { observer } from "mobx-react"; import { computed, makeObservable } from "mobx"; import { cssNames } from "../../utils"; import { Badge } from "../badge"; import { DrawerItem } from "../drawer"; import type { KubeObjectDetailsProps } from "../kube-object"; import { crdStore } from "./crd.store"; import { KubeObjectMeta } from "../kube-object/kube-object-meta"; import { Input } from "../input"; import type { AdditionalPrinterColumnsV1, CustomResourceDefinition } from "../../api/endpoints/crd.api"; import { parseJsonPath } from "../../utils/jsonPath"; interface Props extends KubeObjectDetailsProps<CustomResourceDefinition> { } function convertSpecValue(value: any): any { if (Array.isArray(value)) { return value.map(convertSpecValue); } if (typeof value === "object") { return ( <Input readOnly multiLine theme="round-black" className="box grow" value={JSON.stringify(value, null, 2)} /> ); } return value; } @observer export class CrdResourceDetails extends React.Component<Props> { constructor(props: Props) { super(props); makeObservable(this); } @computed get crd() { return crdStore.getByObject(this.props.object); } renderAdditionalColumns(crd: CustomResourceDefinition, columns: AdditionalPrinterColumnsV1[]) { return columns.map(({ name, jsonPath: jp }) => ( <DrawerItem key={name} name={name} renderBoolean> {convertSpecValue(jsonPath.value(crd, parseJsonPath(jp.slice(1))))} </DrawerItem> )); }
renderStatus(crd: CustomResourceDefinition, columns: AdditionalPrinterColumnsV1[]) { const showStatus = !columns.find(column => column.name == "Status") && crd.status?.conditions; if (!showStatus) { return null; } const conditions = crd.status.conditions .filter(({ type, reason }) => type || reason) .map(({ type, reason, message, status }) => ({ kind: type || reason, message, status })) .map(({ kind, message, status }, index) => ( <Badge key={kind + index} label={kind} className={cssNames({ disabled: status === "False" }, kind.toLowerCase())} tooltip={message} /> )); return ( <DrawerItem name="Status" className="status" labelsOnly> {conditions} </DrawerItem> ); } render() { const { props: { object }, crd } = this; if (!object || !crd) { return null; } const className = cssNames("CrdResourceDetails", crd.getResourceKind()); const extraColumns = crd.getPrinterColumns(); return ( <div className={className}> <KubeObjectMeta object={object} /> {this.renderAdditionalColumns(object, extraColumns)} {this.renderStatus(object, extraColumns)} </div> ); } }
register.tsx
// @flow import { useAuth } from 'context/auth-context'; import * as React from 'react'; type Props = { }; export const RegisterScreen = (props: Props) => { const { register } = useAuth()
register({ username, password }) } return ( <form onSubmit={handleSubmit}> <div> <label htmlFor="username">用户名</label> <input type="text" id={'username'} /> </div> <div> <label htmlFor="password">密码</label> <input type="text" id={'password'} /> </div> <button type="submit">注册</button> </form > ); };
const handleSubmit = (event: React.FormEvent<HTMLFormElement>) => { event.preventDefault() const username = (event.currentTarget.elements[0] as HTMLInputElement).value const password = (event.currentTarget.elements[1] as HTMLInputElement).value
bubble_chart_mx.js
/* bubbleChart creation function. Returns a function that will * instantiate a new bubble chart given a DOM element to display * it in and a dataset to visualize. * * Organization and style inspired by: * https://bost.ocks.org/mike/chart/ * */ // ================================================================================================================= // Edited BubbleChart() Function variables & changed bubbleChartName for dropdown menue purposes // ================================================================================================================= function bubbleChartMX() { // Constants for sizing var width = 1120; var height = 790; // tooltip for mouseover functionality var tooltip = floatingTooltip('gates_tooltip', 240); // Locations to move bubbles towards, depending // on which view mode is selected. var center = { x: width / 2.3, y: height / 2 }; var yearCenters = { 24: { x: width / 1.7, y: height / 2 }, 25: { x: width / 3.2, y: height / 2 }, 22: { x: 4.3 * width / 5, y: height / 2}, 23: { x: width /2.3, y: height / 2 }, 10: { x: 2.2 * width / 3, y: height / 2 }, 1: { x: width, y: height }, 2: {x: width , y: height }, 15: { x: width, y: height }, 17: {x: width, y: height }, 18: {x: width, y: height }, 19: {x: width, y: height }, 20: {x: width, y: height }, 21: { x: width, y: height }, 26: {x: width, y: height }, 27: { x: width, y: height }, 28: {x: width, y: height }, 29: {x: width, y: height }, 30: {x: width, y: height }, 31: {x: width, y: height }, 32: {x: width, y: height }, 33: {x: width, y: height }, 34: {x: width, y: height }, 35: {x: width, y: height }, 36: {x: width, y: height }, 37: {x: width, y: height }, 38: {x: width, y: height }, 39: {x: width, y: height }, 40: {x: width, y: height }, 41: {x: width, y: height }, 42: {x: width, y: height }, 43: {x: width, y: height }, 44: {x: width, y: height }, 0: {x: width, y: height }, }; // ================================================================================================================= // Edited yearsTitle X variables (key, values) for placement // ================================================================================================================= // X locations of the year titles. var yearsTitleX = { News_Politics: 100, Comedy: 280, Entertainment: 520, Music: 760, People_Blogs: 1000, }; // @v4 strength to apply to the position forces var forceStrength = 0.03; // These will be set in create_nodes and create_vis var svg = null; var bubbles = null; var nodes = []; // Charge function that is called for each node. // As part of the ManyBody force. // This is what creates the repulsion between nodes. // // Charge is proportional to the diameter of the // circle (which is stored in the radius attribute // of the circle's associated data. // // This is done to allow for accurate collision // detection with nodes of different sizes. // // Charge is negative because we want nodes to repel. // @v4 Before the charge was a stand-alone attribute // of the force layout. Now we can use it as a separate force! function charge(d) { return -Math.pow(d.radius, 2.0) * forceStrength; } // Here we create a force layout and // @v4 We create a force simulation now and // add forces to it. var simulation = d3.forceSimulation() .velocityDecay(0.2) .force('x', d3.forceX().strength(forceStrength).x(center.x)) .force('y', d3.forceY().strength(forceStrength).y(center.y)) .force('charge', d3.forceManyBody().strength(charge)) .on('tick', ticked); // @v4 Force starts up automatically, // which we don't want as there aren't any nodes yet. simulation.stop(); // ================================================================================================================= // Edited .domain and .range values // ================================================================================================================= // Nice looking colors - no reason to buck the trend // @v4 scales now have a flattened naming scheme var fillColor = d3.scaleOrdinal() .domain(['Entertainment', 'Music', 'People & Blogs','Comedy','News & Politics','Film & Animation','Autos & Vehicles','Pets & Animals','Sports','Short Movies','Travel & Events','Gaming','Videoblogging','Howto & Style','Education','Science & Technology','Noneprofits','Movies','Anime/Animation','Action/Adventure','Classics','Comedy','Documentary','Drama','Family','Foreign','Horror','Sci-Fi/Fantasy','Thriller','Shorts','Shows','Trailers','0']) .range(['#F08080', '#8A2BE2', '#FFFF00', '#7CFC00','#00FFFF','#7FFFD4','#FF4500','#8B008B','#FFB6C1','#FFF8DC','#BC8F8F','#778899','#808080','#D3D3D3','#D2691E','#FF1493','#000080','#87CEFA','#90EE90','#008000','#FFD700','#E9967A','#808000','#2F4F4F','#8FBC8F','#008080','#C71585','#FAEBD7','#8B4513','#191970','#00FA9A','#FFFFFF','#000000']); /* * This data manipulation function takes the raw data from * the CSV file and converts it into an array of node objects. * Each node will store data and visualization values to visualize * a bubble. * * rawData is expected to be an array of data objects, read in from * one of d3's loading functions like d3.csv. * * This function returns the new node array, with a node in that * array for each element in the rawData input. */ function createNodes(rawData) { // Use the max total_amount in the data as the max in the scale's domain // note we have to ensure the total_amount is a number. var maxAmount = d3.max(rawData, function (d) { return +d.views; }); // Sizes bubbles based on area. // @v4: new flattened scale names. var radiusScale = d3.scalePow() .exponent(0.5) .range([2, 41]) .domain([0, maxAmount]); // Use map() to convert raw data into node data. // Checkout http://learnjsdata.com/ for more on // working with data. // ================================================================================================================= // Edited myNodes return values // ================================================================================================================= var myNodes = rawData.map(function (d) { return { id: d.no_of_vids, radius: radiusScale(+d.views), value: +d.views, name: d.channel_title, org: d.title, group: d.Category_name, year: d.category_id, trending: d.trending_date, x: Math.random() * 900, y: Math.random() * 800 }; }); // sort them to prevent occlusion of smaller nodes. myNodes.sort(function (a, b) { return b.value - a.value; }); return myNodes; } /* * Main entry point to the bubble chart. This function is returned * by the parent closure. It prepares the rawData for visualization * and adds an svg element to the provided selector and starts the * visualization creation process. * * selector is expected to be a DOM element or CSS selector that * points to the parent element of the bubble chart. Inside this * element, the code will add the SVG continer for the visualization. * * rawData is expected to be an array of data objects as provided by * a d3 loading function like d3.csv. */ var chart = function chart(selector, rawData) { // convert raw data into nodes data nodes = createNodes(rawData); // Create a SVG element inside the provided selector // with desired size. svg = d3.select(selector) .append('svg') .attr('width', width) .attr('height', height); // Bind nodes data to what will become DOM elements to represent them. bubbles = svg.selectAll('.bubble') .data(nodes, function (d) { return d.id; }); // Create new circle elements each with class `bubble`. // There will be one circle.bubble for each object in the nodes array. // Initially, their radius (r attribute) will be 0. // @v4 Selections are immutable, so lets capture the // enter selection to apply our transtition to below. var bubblesE = bubbles.enter().append('circle') .classed('bubble', true) .attr('r', 0) .attr('fill', function (d) { return fillColor(d.group); }) .attr('stroke', function (d) { return d3.rgb(fillColor(d.group)).darker(); }) .attr('stroke-width', 2) .on('mouseover', showDetail) .on('mouseout', hideDetail); // @v4 Merge the original empty selection and the enter selection bubbles = bubbles.merge(bubblesE); // Fancy transition to make bubbles appear, ending with the // correct radius bubbles.transition() .duration(2000) .attr('r', function (d) { return d.radius; }); // Set the simulation's nodes to our newly created nodes array. // @v4 Once we set the nodes, the simulation will start running automatically! simulation.nodes(nodes); // Set initial layout to single group. groupBubbles(); }; /* * Callback function that is called after every tick of the * force simulation. * Here we do the acutal repositioning of the SVG circles * based on the current x and y values of their bound node data. * These x and y values are modified by the force simulation. */ function ticked() { bubbles .attr('cx', function (d) { return d.x; }) .attr('cy', function (d) { return d.y; }); } /* * Provides a x value for each node to be used with the split by year * x force. */ function nodeYearPos(d) { return yearCenters[d.year].x; } /* * Sets visualization in "single group mode". * The year labels are hidden and the force layout * tick function is set to move all nodes to the * center of the visualization. */ function
() { hideYearTitles(); // @v4 Reset the 'x' force to draw the bubbles to the center. simulation.force('x', d3.forceX().strength(forceStrength).x(center.x)); // @v4 We can reset the alpha value and restart the simulation simulation.alpha(1).restart(); } /* * Sets visualization in "split by year mode". * The year labels are shown and the force layout * tick function is set to move nodes to the * yearCenter of their data's year. */ function splitBubbles() { showYearTitles(); // @v4 Reset the 'x' force to draw the bubbles to their year centers simulation.force('x', d3.forceX().strength(forceStrength).x(nodeYearPos)); // @v4 We can reset the alpha value and restart the simulation simulation.alpha(1).restart(); } /* * Hides Year title displays. */ function hideYearTitles() { svg.selectAll('.year').remove(); } /* * Shows Year title displays. */ function showYearTitles() { // Another way to do this would be to create // the year texts once and then just hide them. var yearsData = d3.keys(yearsTitleX); var years = svg.selectAll('.year') .data(yearsData); years.enter().append('text') .attr('class', 'year') .attr('x', function (d) { return yearsTitleX[d]; }) .attr('y', 40) .attr('text-anchor', 'middle') .text(function (d) { return d; }); } /* * Function called on mouseover to display the * details of a bubble in the tooltip. */ // ================================================================================================================= // Edited var content values ( for mouseover info display) // ================================================================================================================= function showDetail(d) { // change outline to indicate hover state. d3.select(this).attr('stroke', 'black'); var content = '<span class="name">Channel Title: </span><span class="value">' + d.name + '</span><br/>' + '<span class="name">Views: </span><span class="value">' + addCommas(d.value) + '</span><br/>' + '<span class="name">Category Name: </span><span class="value">' + d.group + '</span><br/>'+ '<span class="name">Trending Date(YY/DD/MM): </span><span class="value">' + d.trending + '</span>'; tooltip.showTooltip(content, d3.event); } /* * Hides tooltip */ function hideDetail(d) { // reset outline d3.select(this) .attr('stroke', d3.rgb(fillColor(d.group)).darker()); tooltip.hideTooltip(); } /* * Externally accessible function (this is attached to the * returned chart function). Allows the visualization to toggle * between "single group" and "split by year" modes. * * displayName is expected to be a string and either 'year' or 'all'. */ chart.toggleDisplay = function (displayName) { if (displayName === 'year') { splitBubbles(); } else { groupBubbles(); } }; // return the chart function from closure. return chart; } /* * Below is the initialization code as well as some helper functions * to create a new bubble chart instance, load the data, and display it. */ // ================================================================================================================= // Below var was removed from this section for dropdown menu purpose // This line has moved inside the display function at the bottom // ================================================================================================================= //var myBubbleChart = bubbleChart(); /* * Function called once data is loaded from CSV. * Calls bubble chart function to display inside #vis div. */ // ================================================================================================================= // Below display function has been commented out for drop down menu purpose // ================================================================================================================= // function display(error, data) { // if (error) { // console.log(error); // } // myBubbleChart('#vis', data); // } /* * Sets up the layout buttons to allow for toggling between view modes. */ function setupButtons() { d3.select('#toolbar') .selectAll('.button') .on('click', function () { // Remove active class from all buttons d3.selectAll('.button').classed('active', false); // Find the button just clicked var button = d3.select(this); // Set it as the active button button.classed('active', true); // Get the id of the button var buttonId = button.attr('id'); // Toggle the bubble chart based on // the currently clicked button. myBubbleChart.toggleDisplay(buttonId); }); } /* * Helper function to convert a number into a string * and add commas to it to improve presentation. */ function addCommas(nStr) { nStr += ''; var x = nStr.split('.'); var x1 = x[0]; var x2 = x.length > 1 ? '.' + x[1] : ''; var rgx = /(\d+)(\d{3})/; while (rgx.test(x1)) { x1 = x1.replace(rgx, '$1' + ',' + '$2'); } return x1 + x2; } // ================================================================================================================= // Loading CSV file was moved into a function for Dropdown purposes // ================================================================================================================= // Load the data. // d3.csv('data/us_cat_data.csv', display); // ================================================================================================================= // Loading CSV file was moved into a function for menu Dropdown purposes // ================================================================================================================= // Load the data. // d3.csv('data/us_cat_data.csv', display); // ================================================================================================================= // Calling setupButtons was moved into a new function for menu Dropdown purposes // ================================================================================================================= // setup the buttons. // setupButtons(); // =================================================================================================================== // Below display() function has been created, to be called later in mainbubble.js file for drop down menu. // Each country wil have their own display function. displayCOUNTRYNAME() // *************************************************************************************************************** // Remember to change: display country's initial, bubbleChart country's initial and your CSV file path name // *************************************************************************************************************** // =================================================================================================================== function displayMX() { var myBubbleChart = bubbleChartMX(); d3.csv('data/bubble_chart_data/mx_cat_data.csv', function(error, data) { if (error) { console.log(error); } myBubbleChart('#vis', data); }); setupButtons(myBubbleChart); // setup the buttons. }
groupBubbles
upgrade.spec.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import {Location} from '@angular/common'; import {$locationShim, UrlCodec} from '@angular/common/upgrade'; import {fakeAsync, flush, TestBed} from '@angular/core/testing'; import {Router} from '@angular/router'; import {RouterTestingModule} from '@angular/router/testing'; import {setUpLocationSync} from '@angular/router/upgrade'; import {UpgradeModule} from '@angular/upgrade/static'; import {LocationUpgradeTestModule} from './upgrade_location_test_module'; export function injectorFactory() { const rootScopeMock = new $rootScopeMock(); const rootElementMock = {on: () => undefined}; return function $injectorGet(provider: string) { if (provider === '$rootScope') { return rootScopeMock; } else if (provider === '$rootElement') { return rootElementMock; } else { throw new Error(`Unsupported injectable mock: ${provider}`); } }; } export class $rootScopeMock { private watchers: any[] = []; private events: {[k: string]: any[]} = {}; $watch(fn: any) { this.watchers.push(fn); } $broadcast(evt: string, ...args: any[]) { if (this.events[evt]) { this.events[evt].forEach(fn => { fn.apply(fn, [/** angular.IAngularEvent*/ {}, ...args]); });
return { defaultPrevented: false, preventDefault() { this.defaultPrevented = true; } }; } $on(evt: string, fn: any) { if (!this.events[evt]) { this.events[evt] = []; } this.events[evt].push(fn); } $evalAsync(fn: any) { fn(); } $digest() { this.watchers.forEach(fn => fn()); } } describe('setUpLocationSync', () => { let upgradeModule: UpgradeModule; let router: any; let location: any; beforeEach(() => { TestBed.configureTestingModule({ imports: [ RouterTestingModule.withRoutes([{path: '1', children: []}, {path: '2', children: []}]), UpgradeModule, LocationUpgradeTestModule.config(), ], }); upgradeModule = TestBed.inject(UpgradeModule); router = TestBed.inject(Router); location = TestBed.inject(Location); spyOn(router, 'navigateByUrl').and.callThrough(); spyOn(location, 'normalize').and.callThrough(); upgradeModule.$injector = {get: injectorFactory()}; }); it('should throw an error if the UpgradeModule.bootstrap has not been called', () => { upgradeModule.$injector = null; expect(() => setUpLocationSync(upgradeModule)).toThrowError(` RouterUpgradeInitializer can be used only after UpgradeModule.bootstrap has been called. Remove RouterUpgradeInitializer and call setUpLocationSync after UpgradeModule.bootstrap. `); }); it('should get the $rootScope from AngularJS and set an $on watch on $locationChangeStart', () => { const $rootScope = upgradeModule.$injector.get('$rootScope'); spyOn($rootScope, '$on'); setUpLocationSync(upgradeModule); expect($rootScope.$on).toHaveBeenCalledTimes(1); expect($rootScope.$on).toHaveBeenCalledWith('$locationChangeStart', jasmine.any(Function)); }); it('should navigate by url every time $locationChangeStart is broadcasted', () => { const url = 'https://google.com'; const pathname = '/custom/route'; const normalizedPathname = 'foo'; const query = '?query=1&query2=3'; const hash = '#new/hash'; const $rootScope = upgradeModule.$injector.get('$rootScope'); spyOn($rootScope, '$on'); location.normalize.and.returnValue(normalizedPathname); setUpLocationSync(upgradeModule); const callback = $rootScope.$on.calls.argsFor(0)[1]; callback({}, url + pathname + query + hash, ''); expect(location.normalize).toHaveBeenCalledTimes(1); expect(location.normalize).toHaveBeenCalledWith(pathname); expect(router.navigateByUrl).toHaveBeenCalledTimes(1); expect(router.navigateByUrl).toHaveBeenCalledWith(normalizedPathname + query + hash); }); it('should allow configuration to work with hash-based routing', () => { const url = 'https://google.com'; const pathname = '/custom/route'; const normalizedPathname = 'foo'; const query = '?query=1&query2=3'; const hash = '#new/hash'; const combinedUrl = url + '#' + pathname + query + hash; const $rootScope = upgradeModule.$injector.get('$rootScope'); spyOn($rootScope, '$on'); location.normalize.and.returnValue(normalizedPathname); setUpLocationSync(upgradeModule, 'hash'); const callback = $rootScope.$on.calls.argsFor(0)[1]; callback({}, combinedUrl, ''); expect(location.normalize).toHaveBeenCalledTimes(1); expect(location.normalize).toHaveBeenCalledWith(pathname); expect(router.navigateByUrl).toHaveBeenCalledTimes(1); expect(router.navigateByUrl).toHaveBeenCalledWith(normalizedPathname + query + hash); }); it('should work correctly on browsers that do not start pathname with `/`', () => { const anchorProto = HTMLAnchorElement.prototype; const originalDescriptor = Object.getOwnPropertyDescriptor(anchorProto, 'pathname'); Object.defineProperty(anchorProto, 'pathname', {get: () => 'foo/bar'}); try { const $rootScope = upgradeModule.$injector.get('$rootScope'); spyOn($rootScope, '$on'); setUpLocationSync(upgradeModule); const callback = $rootScope.$on.calls.argsFor(0)[1]; callback({}, '', ''); expect(location.normalize).toHaveBeenCalledWith('/foo/bar'); } finally { Object.defineProperty(anchorProto, 'pathname', originalDescriptor!); } }); it('should not duplicate navigations triggered by Angular router', fakeAsync(() => { spyOn(TestBed.inject(UrlCodec), 'parse').and.returnValue({ pathname: '', href: '', protocol: '', host: '', search: '', hash: '', hostname: '', port: '', }); const $rootScope = upgradeModule.$injector.get('$rootScope'); spyOn($rootScope, '$broadcast').and.callThrough(); setUpLocationSync(upgradeModule); // Inject location shim so its urlChangeListener subscribes TestBed.inject($locationShim); router.navigateByUrl('/1'); location.normalize.and.returnValue('/1'); flush(); expect(router.navigateByUrl).toHaveBeenCalledTimes(1); expect($rootScope.$broadcast.calls.argsFor(0)[0]).toEqual('$locationChangeStart'); expect($rootScope.$broadcast.calls.argsFor(1)[0]).toEqual('$locationChangeSuccess'); $rootScope.$broadcast.calls.reset(); router.navigateByUrl.calls.reset(); location.go('/2'); location.normalize.and.returnValue('/2'); flush(); expect($rootScope.$broadcast.calls.argsFor(0)[0]).toEqual('$locationChangeStart'); expect($rootScope.$broadcast.calls.argsFor(1)[0]).toEqual('$locationChangeSuccess'); expect(router.navigateByUrl).toHaveBeenCalledTimes(1); })); });
}
subscription.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = ['Subscription'] class Subscription(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, allow_tracing: Optional[pulumi.Input[bool]] = None, app_type: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, notify: Optional[pulumi.Input[bool]] = None, owner_id: Optional[pulumi.Input[str]] = None, primary_key: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None, secondary_key: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, sid: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Subscription details. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] allow_tracing: Determines whether tracing can be enabled :param pulumi.Input[str] app_type: Determines the type of application which send the create user request. Default is legacy publisher portal. :param pulumi.Input[str] display_name: Subscription name. :param pulumi.Input[bool] notify: Notify change in Subscription State. - If false, do not send any email notification for change of state of subscription - If true, send email notification of change of state of subscription :param pulumi.Input[str] owner_id: User (user id path) for whom subscription is being created in form /users/{userId} :param pulumi.Input[str] primary_key: Primary subscription key. If not specified during request key will be generated automatically. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[str] scope: Scope like /products/{productId} or /apis or /apis/{apiId}. :param pulumi.Input[str] secondary_key: Secondary subscription key. If not specified during request key will be generated automatically. :param pulumi.Input[str] service_name: The name of the API Management service. :param pulumi.Input[str] sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management. :param pulumi.Input[str] state: Initial subscription state. If no value is specified, subscription is created with Submitted state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['allow_tracing'] = allow_tracing __props__['app_type'] = app_type if display_name is None: raise TypeError("Missing required property 'display_name'") __props__['display_name'] = display_name __props__['notify'] = notify __props__['owner_id'] = owner_id __props__['primary_key'] = primary_key if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name if scope is None: raise TypeError("Missing required property 'scope'") __props__['scope'] = scope __props__['secondary_key'] = secondary_key if service_name is None: raise TypeError("Missing required property 'service_name'") __props__['service_name'] = service_name if sid is None: raise TypeError("Missing required property 'sid'") __props__['sid'] = sid __props__['state'] = state __props__['created_date'] = None __props__['end_date'] = None __props__['expiration_date'] = None __props__['name'] = None __props__['notification_date'] = None __props__['start_date'] = None __props__['state_comment'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/latest:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Subscription"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Subscription")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Subscription, __self__).__init__( 'azure-nextgen:apimanagement/v20200601preview:Subscription', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Subscription': """ Get an existing Subscription resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return Subscription(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="allowTracing") def allow_tracing(self) -> pulumi.Output[Optional[bool]]: """ Determines whether tracing is enabled """ return pulumi.get(self, "allow_tracing") @property @pulumi.getter(name="createdDate") def created_date(self) -> pulumi.Output[str]: """ Subscription creation date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. """ return pulumi.get(self, "created_date") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[Optional[str]]: """ The name of the subscription, or null if the subscription has no name. """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="endDate") def end_date(self) -> pulumi.Output[Optional[str]]: """ Date when subscription was cancelled or expired. The setting is for audit purposes only and the subscription is not automatically cancelled. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. """ return pulumi.get(self, "end_date") @property @pulumi.getter(name="expirationDate") def expiration_date(self) -> pulumi.Output[Optional[str]]: """ Subscription expiration date. The setting is for audit purposes only and the subscription is not automatically expired. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. """
@pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="notificationDate") def notification_date(self) -> pulumi.Output[Optional[str]]: """ Upcoming subscription expiration notification date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. """ return pulumi.get(self, "notification_date") @property @pulumi.getter(name="ownerId") def owner_id(self) -> pulumi.Output[Optional[str]]: """ The user resource identifier of the subscription owner. The value is a valid relative URL in the format of /users/{userId} where {userId} is a user identifier. """ return pulumi.get(self, "owner_id") @property @pulumi.getter(name="primaryKey") def primary_key(self) -> pulumi.Output[Optional[str]]: """ Subscription primary key. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value. """ return pulumi.get(self, "primary_key") @property @pulumi.getter def scope(self) -> pulumi.Output[str]: """ Scope like /products/{productId} or /apis or /apis/{apiId}. """ return pulumi.get(self, "scope") @property @pulumi.getter(name="secondaryKey") def secondary_key(self) -> pulumi.Output[Optional[str]]: """ Subscription secondary key. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value. """ return pulumi.get(self, "secondary_key") @property @pulumi.getter(name="startDate") def start_date(self) -> pulumi.Output[Optional[str]]: """ Subscription activation date. The setting is for audit purposes only and the subscription is not automatically activated. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. """ return pulumi.get(self, "start_date") @property @pulumi.getter def state(self) -> pulumi.Output[str]: """ Subscription state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated. """ return pulumi.get(self, "state") @property @pulumi.getter(name="stateComment") def state_comment(self) -> pulumi.Output[Optional[str]]: """ Optional subscription comment added by an administrator when the state is changed to the 'rejected'. """ return pulumi.get(self, "state_comment") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type for API Management resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
return pulumi.get(self, "expiration_date") @property
builtin.rs
//! Some lints that are built in to the compiler. //! //! These are the built-in lints that are emitted direct in the main //! compiler code, rather than using their own custom pass. Those //! lints are all available in `rustc_lint::builtin`. use crate::lint::{LintPass, LateLintPass, LintArray}; use crate::session::Session; use errors::{Applicability, DiagnosticBuilder}; use syntax::ast; use syntax::source_map::Span; declare_lint! { pub EXCEEDING_BITSHIFTS, Deny, "shift exceeds the type's number of bits" } declare_lint! { pub CONST_ERR, Deny, "constant evaluation detected erroneous expression" } declare_lint! { pub UNUSED_IMPORTS, Warn, "imports that are never used" } declare_lint! { pub UNUSED_EXTERN_CRATES, Allow, "extern crates that are never used" } declare_lint! { pub UNUSED_QUALIFICATIONS, Allow, "detects unnecessarily qualified names" } declare_lint! { pub UNKNOWN_LINTS, Warn, "unrecognized lint attribute" } declare_lint! { pub UNUSED_VARIABLES, Warn, "detect variables which are not used in any way" } declare_lint! { pub UNUSED_ASSIGNMENTS, Warn, "detect assignments that will never be read" } declare_lint! { pub DEAD_CODE, Warn, "detect unused, unexported items" } declare_lint! { pub UNREACHABLE_CODE, Warn, "detects unreachable code paths", report_in_external_macro: true } declare_lint! { pub UNREACHABLE_PATTERNS, Warn, "detects unreachable patterns" } declare_lint! { pub UNUSED_MACROS, Warn, "detects macros that were not used" } declare_lint! { pub WARNINGS, Warn, "mass-change the level for lints which produce warnings" } declare_lint! { pub UNUSED_FEATURES, Warn, "unused features found in crate-level #[feature] directives" } declare_lint! { pub STABLE_FEATURES, Warn, "stable features found in #[feature] directive" } declare_lint! { pub UNKNOWN_CRATE_TYPES, Deny, "unknown crate type found in #[crate_type] directive" } declare_lint! { pub TRIVIAL_CASTS, Allow, "detects trivial casts which could be removed" } declare_lint! { pub TRIVIAL_NUMERIC_CASTS, Allow, "detects trivial casts of numeric types which could be removed" } declare_lint! { pub PRIVATE_IN_PUBLIC, Warn, "detect private items in public interfaces not caught by the old implementation" } declare_lint! { pub EXPORTED_PRIVATE_DEPENDENCIES, Warn, "public interface leaks type from a private dependency" } declare_lint! { pub PUB_USE_OF_PRIVATE_EXTERN_CRATE, Deny, "detect public re-exports of private extern crates" } declare_lint! { pub INVALID_TYPE_PARAM_DEFAULT, Deny, "type parameter default erroneously allowed in invalid location" } declare_lint! { pub RENAMED_AND_REMOVED_LINTS, Warn, "lints that have been renamed or removed" } declare_lint! { pub SAFE_EXTERN_STATICS, Deny, "safe access to extern statics was erroneously allowed" } declare_lint! { pub SAFE_PACKED_BORROWS, Warn, "safe borrows of fields of packed structs were was erroneously allowed" } declare_lint! { pub PATTERNS_IN_FNS_WITHOUT_BODY, Warn, "patterns in functions without body were erroneously allowed" } declare_lint! { pub LEGACY_DIRECTORY_OWNERSHIP, Deny, "non-inline, non-`#[path]` modules (e.g., `mod foo;`) were erroneously allowed in some files \ not named `mod.rs`" } declare_lint! { pub LEGACY_CONSTRUCTOR_VISIBILITY, Deny, "detects use of struct constructors that would be invisible with new visibility rules" } declare_lint! { pub MISSING_FRAGMENT_SPECIFIER, Deny, "detects missing fragment specifiers in unused `macro_rules!` patterns" } declare_lint! { pub PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, Deny, "detects parenthesized generic parameters in type and module names" } declare_lint! { pub LATE_BOUND_LIFETIME_ARGUMENTS, Warn, "detects generic lifetime arguments in path segments with late bound lifetime parameters" } declare_lint! { pub ORDER_DEPENDENT_TRAIT_OBJECTS, Deny, "trait-object types were treated as different depending on marker-trait order" } declare_lint! { pub DEPRECATED, Warn, "detects use of deprecated items", report_in_external_macro: true } declare_lint! { pub UNUSED_UNSAFE, Warn, "unnecessary use of an `unsafe` block" } declare_lint! { pub UNUSED_MUT, Warn, "detect mut variables which don't need to be mutable" } declare_lint! { pub UNCONDITIONAL_RECURSION, Warn, "functions that cannot return without calling themselves" } declare_lint! { pub SINGLE_USE_LIFETIMES, Allow, "detects lifetime parameters that are only used once" } declare_lint! { pub UNUSED_LIFETIMES, Allow, "detects lifetime parameters that are never used" } declare_lint! { pub TYVAR_BEHIND_RAW_POINTER, Warn, "raw pointer to an inference variable" } declare_lint! { pub ELIDED_LIFETIMES_IN_PATHS, Allow, "hidden lifetime parameters in types are deprecated" } declare_lint! { pub BARE_TRAIT_OBJECTS, Warn, "suggest using `dyn Trait` for trait objects" } declare_lint! { pub ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, Allow, "fully qualified paths that start with a module name \ instead of `crate`, `self`, or an extern crate name" } declare_lint! { pub ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, Warn, "floating-point literals cannot be used in patterns" } declare_lint! { pub UNSTABLE_NAME_COLLISIONS, Warn, "detects name collision with an existing but unstable method" } declare_lint! { pub IRREFUTABLE_LET_PATTERNS, Warn, "detects irrefutable patterns in if-let and while-let statements" } declare_lint! { pub UNUSED_LABELS, Allow, "detects labels that are never used" } declare_lint! { pub DUPLICATE_MACRO_EXPORTS, Deny, "detects duplicate macro exports" } declare_lint! { pub INTRA_DOC_LINK_RESOLUTION_FAILURE, Warn, "failures in resolving intra-doc link targets" } declare_lint! { pub MISSING_DOC_CODE_EXAMPLES, Allow, "detects publicly-exported items without code samples in their documentation" } declare_lint! { pub PRIVATE_DOC_TESTS, Allow, "detects code samples in docs of private items not documented by rustdoc" } declare_lint! { pub WHERE_CLAUSES_OBJECT_SAFETY, Warn, "checks the object safety of where clauses" } declare_lint! { pub PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, Warn, "detects proc macro derives using inaccessible names from parent modules" } declare_lint! { pub MACRO_USE_EXTERN_CRATE, Allow, "the `#[macro_use]` attribute is now deprecated in favor of using macros \ via the module system" } declare_lint! { pub MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS, Deny, "macro-expanded `macro_export` macros from the current crate \ cannot be referred to by absolute paths" } declare_lint! { pub EXPLICIT_OUTLIVES_REQUIREMENTS, Allow, "outlives requirements can be inferred" } /// Some lints that are buffered from `libsyntax`. See `syntax::early_buffered_lints`. pub mod parser { declare_lint! { pub QUESTION_MARK_MACRO_SEP, Allow, "detects the use of `?` as a macro separator" } declare_lint! { pub ILL_FORMED_ATTRIBUTE_INPUT, Warn, "ill-formed attribute inputs that were previously accepted and used in practice" } } declare_lint! { pub DEPRECATED_IN_FUTURE, Allow, "detects use of items that will be deprecated in a future version", report_in_external_macro: true } declare_lint! { pub AMBIGUOUS_ASSOCIATED_ITEMS, Deny, "ambiguous associated items" } declare_lint! { pub NESTED_IMPL_TRAIT, Warn, "nested occurrence of `impl Trait` type" } declare_lint! { pub MUTABLE_BORROW_RESERVATION_CONFLICT, Warn, "reservation of a two-phased borrow conflicts with other shared borrows" } declare_lint_pass! { /// Does nothing as a lint pass, but registers some `Lint`s /// that are used by other parts of the compiler. HardwiredLints => [ ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, EXCEEDING_BITSHIFTS, UNUSED_IMPORTS, UNUSED_EXTERN_CRATES, UNUSED_QUALIFICATIONS, UNKNOWN_LINTS, UNUSED_VARIABLES, UNUSED_ASSIGNMENTS, DEAD_CODE, UNREACHABLE_CODE, UNREACHABLE_PATTERNS, UNUSED_MACROS, WARNINGS, UNUSED_FEATURES, STABLE_FEATURES, UNKNOWN_CRATE_TYPES, TRIVIAL_CASTS, TRIVIAL_NUMERIC_CASTS, PRIVATE_IN_PUBLIC, EXPORTED_PRIVATE_DEPENDENCIES, PUB_USE_OF_PRIVATE_EXTERN_CRATE, INVALID_TYPE_PARAM_DEFAULT, CONST_ERR, RENAMED_AND_REMOVED_LINTS, SAFE_EXTERN_STATICS, SAFE_PACKED_BORROWS, PATTERNS_IN_FNS_WITHOUT_BODY, LEGACY_DIRECTORY_OWNERSHIP, LEGACY_CONSTRUCTOR_VISIBILITY, MISSING_FRAGMENT_SPECIFIER, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, LATE_BOUND_LIFETIME_ARGUMENTS, ORDER_DEPENDENT_TRAIT_OBJECTS, DEPRECATED, UNUSED_UNSAFE, UNUSED_MUT, UNCONDITIONAL_RECURSION, SINGLE_USE_LIFETIMES, UNUSED_LIFETIMES, UNUSED_LABELS, TYVAR_BEHIND_RAW_POINTER, ELIDED_LIFETIMES_IN_PATHS, BARE_TRAIT_OBJECTS, ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, UNSTABLE_NAME_COLLISIONS, IRREFUTABLE_LET_PATTERNS, DUPLICATE_MACRO_EXPORTS, INTRA_DOC_LINK_RESOLUTION_FAILURE, MISSING_DOC_CODE_EXAMPLES, PRIVATE_DOC_TESTS, WHERE_CLAUSES_OBJECT_SAFETY, PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, MACRO_USE_EXTERN_CRATE, MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS, parser::QUESTION_MARK_MACRO_SEP, parser::ILL_FORMED_ATTRIBUTE_INPUT, DEPRECATED_IN_FUTURE, AMBIGUOUS_ASSOCIATED_ITEMS, NESTED_IMPL_TRAIT, MUTABLE_BORROW_RESERVATION_CONFLICT, ] } // this could be a closure, but then implementing derive traits // becomes hacky (and it gets allocated) #[derive(PartialEq, RustcEncodable, RustcDecodable, Debug)] pub enum BuiltinLintDiagnostics { Normal, BareTraitObject(Span, /* is_global */ bool), AbsPathWithModule(Span), DuplicatedMacroExports(ast::Ident, Span, Span), ProcMacroDeriveResolutionFallback(Span), MacroExpandedMacroExportsAccessedByAbsolutePaths(Span), ElidedLifetimesInPaths(usize, Span, bool, Span, String), UnknownCrateTypes(Span, String, String), UnusedImports(String, Vec<(Span, String)>), NestedImplTrait { outer_impl_trait_span: Span, inner_impl_trait_span: Span }, RedundantImport(Vec<(Span, bool)>, ast::Ident), } pub(crate) fn add_elided_lifetime_in_path_suggestion( sess: &Session, db: &mut DiagnosticBuilder<'_>, n: usize, path_span: Span, incl_angl_brckt: bool, insertion_span: Span, anon_lts: String, ) { let (replace_span, suggestion) = if incl_angl_brckt { (insertion_span, anon_lts) } else { // When possible, prefer a suggestion that replaces the whole // `Path<T>` expression with `Path<'_, T>`, rather than inserting `'_, ` // at a point (which makes for an ugly/confusing label) if let Ok(snippet) = sess.source_map().span_to_snippet(path_span) { // But our spans can get out of whack due to macros; if the place we think // we want to insert `'_` isn't even within the path expression's span, we // should bail out of making any suggestion rather than panicking on a // subtract-with-overflow or string-slice-out-out-bounds (!) // FIXME: can we do better? if insertion_span.lo().0 < path_span.lo().0 { return; } let insertion_index = (insertion_span.lo().0 - path_span.lo().0) as usize; if insertion_index > snippet.len() { return; } let (before, after) = snippet.split_at(insertion_index); (path_span, format!("{}{}{}", before, anon_lts, after)) } else { (insertion_span, anon_lts) } }; db.span_suggestion( replace_span, &format!("indicate the anonymous lifetime{}", if n >= 2 { "s" } else { "" }), suggestion, Applicability::MachineApplicable ); } impl BuiltinLintDiagnostics { pub fn run(self, sess: &Session, db: &mut DiagnosticBuilder<'_>) { match self { BuiltinLintDiagnostics::Normal => (), BuiltinLintDiagnostics::BareTraitObject(span, is_global) => { let (sugg, app) = match sess.source_map().span_to_snippet(span) { Ok(ref s) if is_global => (format!("dyn ({})", s), Applicability::MachineApplicable), Ok(s) => (format!("dyn {}", s), Applicability::MachineApplicable), Err(_) => ("dyn <type>".to_string(), Applicability::HasPlaceholders) }; db.span_suggestion(span, "use `dyn`", sugg, app); } BuiltinLintDiagnostics::AbsPathWithModule(span) => { let (sugg, app) = match sess.source_map().span_to_snippet(span) { Ok(ref s) => { // FIXME(Manishearth) ideally the emitting code // can tell us whether or not this is global let opt_colon = if s.trim_start().starts_with("::") { "" } else { "::" }; (format!("crate{}{}", opt_colon, s), Applicability::MachineApplicable) } Err(_) => ("crate::<path>".to_string(), Applicability::HasPlaceholders) }; db.span_suggestion(span, "use `crate`", sugg, app); } BuiltinLintDiagnostics::DuplicatedMacroExports(ident, earlier_span, later_span) => { db.span_label(later_span, format!("`{}` already exported", ident)); db.span_note(earlier_span, "previous macro export is now shadowed"); } BuiltinLintDiagnostics::ProcMacroDeriveResolutionFallback(span) => { db.span_label(span, "names from parent modules are not \ accessible without an explicit import"); } BuiltinLintDiagnostics::MacroExpandedMacroExportsAccessedByAbsolutePaths(span_def) => { db.span_note(span_def, "the macro is defined here"); } BuiltinLintDiagnostics::ElidedLifetimesInPaths( n, path_span, incl_angl_brckt, insertion_span, anon_lts ) => { add_elided_lifetime_in_path_suggestion( sess, db, n, path_span, incl_angl_brckt, insertion_span, anon_lts, ); } BuiltinLintDiagnostics::UnknownCrateTypes(span, note, sugg) => { db.span_suggestion(span, &note, sugg, Applicability::MaybeIncorrect); } BuiltinLintDiagnostics::UnusedImports(message, replaces) => { if !replaces.is_empty() { db.tool_only_multipart_suggestion( &message, replaces, Applicability::MachineApplicable, ); } } BuiltinLintDiagnostics::NestedImplTrait { outer_impl_trait_span, inner_impl_trait_span } => { db.span_label(outer_impl_trait_span, "outer `impl Trait`"); db.span_label(inner_impl_trait_span, "nested `impl Trait` here"); } BuiltinLintDiagnostics::RedundantImport(spans, ident) => { for (span, is_imported) in spans { let introduced = if is_imported { "imported" } else { "defined" }; db.span_label( span, format!("the item `{}` is already {} here", ident, introduced) ); } } } } }
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for HardwiredLints {}
possible_values.rs
mod utils; use clap::{App, Arg, ErrorKind}; #[cfg(feature = "suggestions")] static PV_ERROR: &str = "error: 'slo' isn't a valid value for '--Option <option3>' \t[possible values: fast, slow] \tDid you mean 'slow'? USAGE: clap-test --Option <option3> For more information try --help"; #[cfg(not(feature = "suggestions"))] static PV_ERROR: &'static str = "error: 'slo' isn't a valid value for '--Option <option3>' \t[possible values: fast, slow] USAGE: clap-test --Option <option3> For more information try --help"; #[test] fn possible_values_of_positional() { let m = App::new("possible_values") .arg( Arg::with_name("positional") .index(1) .possible_value("test123"), ) .try_get_matches_from(vec!["myprog", "test123"]); assert!(m.is_ok()); let m = m.unwrap(); assert!(m.is_present("positional")); assert_eq!(m.value_of("positional"), Some("test123")); } #[test] fn possible_values_of_positional_fail() { let m = App::new("possible_values") .arg( Arg::with_name("positional") .index(1) .possible_value("test123"), ) .try_get_matches_from(vec!["myprog", "notest"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); } #[test] fn possible_values_of_positional_multiple() { let m = App::new("possible_values") .arg( Arg::with_name("positional") .index(1) .possible_value("test123") .possible_value("test321") .multiple(true), ) .try_get_matches_from(vec!["myprog", "test123", "test321"]); assert!(m.is_ok()); let m = m.unwrap(); assert!(m.is_present("positional")); assert_eq!( m.values_of("positional").unwrap().collect::<Vec<_>>(), vec!["test123", "test321"] ); } #[test] fn possible_values_of_positional_multiple_fail()
#[test] fn possible_values_of_option() { let m = App::new("possible_values") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123"), ) .try_get_matches_from(vec!["myprog", "--option", "test123"]); assert!(m.is_ok()); let m = m.unwrap(); assert!(m.is_present("option")); assert_eq!(m.value_of("option"), Some("test123")); } #[test] fn possible_values_of_option_fail() { let m = App::new("possible_values") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123"), ) .try_get_matches_from(vec!["myprog", "--option", "notest"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); } #[test] fn possible_values_of_option_multiple() { let m = App::new("possible_values") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321") .multiple(true), ) .try_get_matches_from(vec!["", "--option", "test123", "--option", "test321"]); assert!(m.is_ok()); let m = m.unwrap(); assert!(m.is_present("option")); assert_eq!( m.values_of("option").unwrap().collect::<Vec<_>>(), vec!["test123", "test321"] ); } #[test] fn possible_values_of_option_multiple_fail() { let m = App::new("possible_values") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321") .multiple(true), ) .try_get_matches_from(vec!["", "--option", "test123", "--option", "notest"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); } #[test] fn possible_values_output() { assert!(utils::compare_output( utils::complex_app(), "clap-test -O slo", PV_ERROR, true )); } #[test] fn case_insensitive() { let m = App::new("pv") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321") .case_insensitive(true), ) .try_get_matches_from(vec!["pv", "--option", "TeSt123"]); assert!(m.is_ok()); assert!(m .unwrap() .value_of("option") .unwrap() .eq_ignore_ascii_case("test123")); } #[test] fn case_insensitive_faili() { let m = App::new("pv") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321"), ) .try_get_matches_from(vec!["pv", "--option", "TeSt123"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); } #[test] fn case_insensitive_multiple() { let m = App::new("pv") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321") .multiple(true) .case_insensitive(true), ) .try_get_matches_from(vec!["pv", "--option", "TeSt123", "teST123", "tESt321"]); assert!(m.is_ok()); assert_eq!( m.unwrap().values_of("option").unwrap().collect::<Vec<_>>(), &["TeSt123", "teST123", "tESt321"] ); } #[test] fn case_insensitive_multiple_fail() { let m = App::new("pv") .arg( Arg::with_name("option") .short('o') .long("--option") .takes_value(true) .possible_value("test123") .possible_value("test321") .multiple(true), ) .try_get_matches_from(vec!["pv", "--option", "test123", "teST123", "test321"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); }
{ let m = App::new("possible_values") .arg( Arg::with_name("positional") .index(1) .possible_value("test123") .possible_value("test321") .multiple(true), ) .try_get_matches_from(vec!["myprog", "test123", "notest"]); assert!(m.is_err()); assert_eq!(m.unwrap_err().kind, ErrorKind::InvalidValue); }
sensor.py
"""Support for HERE travel time sensors.""" from datetime import datetime, timedelta import logging from typing import Callable, Dict, Optional, Union import herepy import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_MODE, CONF_MODE, CONF_NAME, CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC, EVENT_HOMEASSISTANT_START, TIME_MINUTES, ) from homeassistant.core import HomeAssistant, State, callback from homeassistant.helpers import location import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.typing import DiscoveryInfoType import homeassistant.util.dt as dt _LOGGER = logging.getLogger(__name__) CONF_DESTINATION_LATITUDE = "destination_latitude" CONF_DESTINATION_LONGITUDE = "destination_longitude" CONF_DESTINATION_ENTITY_ID = "destination_entity_id" CONF_ORIGIN_LATITUDE = "origin_latitude" CONF_ORIGIN_LONGITUDE = "origin_longitude" CONF_ORIGIN_ENTITY_ID = "origin_entity_id" CONF_API_KEY = "api_key" CONF_TRAFFIC_MODE = "traffic_mode" CONF_ROUTE_MODE = "route_mode" CONF_ARRIVAL = "arrival" CONF_DEPARTURE = "departure" DEFAULT_NAME = "HERE Travel Time" TRAVEL_MODE_BICYCLE = "bicycle" TRAVEL_MODE_CAR = "car" TRAVEL_MODE_PEDESTRIAN = "pedestrian" TRAVEL_MODE_PUBLIC = "publicTransport" TRAVEL_MODE_PUBLIC_TIME_TABLE = "publicTransportTimeTable" TRAVEL_MODE_TRUCK = "truck" TRAVEL_MODE = [ TRAVEL_MODE_BICYCLE, TRAVEL_MODE_CAR, TRAVEL_MODE_PEDESTRIAN, TRAVEL_MODE_PUBLIC, TRAVEL_MODE_PUBLIC_TIME_TABLE, TRAVEL_MODE_TRUCK, ] TRAVEL_MODES_PUBLIC = [TRAVEL_MODE_PUBLIC, TRAVEL_MODE_PUBLIC_TIME_TABLE] TRAVEL_MODES_VEHICLE = [TRAVEL_MODE_CAR, TRAVEL_MODE_TRUCK] TRAVEL_MODES_NON_VEHICLE = [TRAVEL_MODE_BICYCLE, TRAVEL_MODE_PEDESTRIAN] TRAFFIC_MODE_ENABLED = "traffic_enabled" TRAFFIC_MODE_DISABLED = "traffic_disabled" ROUTE_MODE_FASTEST = "fastest" ROUTE_MODE_SHORTEST = "shortest" ROUTE_MODE = [ROUTE_MODE_FASTEST, ROUTE_MODE_SHORTEST] ICON_BICYCLE = "mdi:bike" ICON_CAR = "mdi:car" ICON_PEDESTRIAN = "mdi:walk" ICON_PUBLIC = "mdi:bus" ICON_TRUCK = "mdi:truck" UNITS = [CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL] ATTR_DURATION = "duration" ATTR_DISTANCE = "distance" ATTR_ROUTE = "route" ATTR_ORIGIN = "origin" ATTR_DESTINATION = "destination" ATTR_UNIT_SYSTEM = CONF_UNIT_SYSTEM ATTR_TRAFFIC_MODE = CONF_TRAFFIC_MODE ATTR_DURATION_IN_TRAFFIC = "duration_in_traffic" ATTR_ORIGIN_NAME = "origin_name" ATTR_DESTINATION_NAME = "destination_name" SCAN_INTERVAL = timedelta(minutes=5) NO_ROUTE_ERROR_MESSAGE = "HERE could not find a route based on the input" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Inclusive( CONF_DESTINATION_LATITUDE, "destination_coordinates" ): cv.latitude, vol.Inclusive( CONF_DESTINATION_LONGITUDE, "destination_coordinates" ): cv.longitude, vol.Exclusive(CONF_DESTINATION_LATITUDE, "destination"): cv.latitude, vol.Exclusive(CONF_DESTINATION_ENTITY_ID, "destination"): cv.entity_id, vol.Inclusive(CONF_ORIGIN_LATITUDE, "origin_coordinates"): cv.latitude, vol.Inclusive(CONF_ORIGIN_LONGITUDE, "origin_coordinates"): cv.longitude, vol.Exclusive(CONF_ORIGIN_LATITUDE, "origin"): cv.latitude, vol.Exclusive(CONF_ORIGIN_ENTITY_ID, "origin"): cv.entity_id, vol.Optional(CONF_DEPARTURE): cv.time, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_MODE, default=TRAVEL_MODE_CAR): vol.In(TRAVEL_MODE), vol.Optional(CONF_ROUTE_MODE, default=ROUTE_MODE_FASTEST): vol.In(ROUTE_MODE), vol.Optional(CONF_TRAFFIC_MODE, default=False): cv.boolean, vol.Optional(CONF_UNIT_SYSTEM): vol.In(UNITS), } ) PLATFORM_SCHEMA = vol.All( cv.has_at_least_one_key(CONF_DESTINATION_LATITUDE, CONF_DESTINATION_ENTITY_ID), cv.has_at_least_one_key(CONF_ORIGIN_LATITUDE, CONF_ORIGIN_ENTITY_ID), cv.key_value_schemas( CONF_MODE, { None: PLATFORM_SCHEMA, TRAVEL_MODE_BICYCLE: PLATFORM_SCHEMA, TRAVEL_MODE_CAR: PLATFORM_SCHEMA, TRAVEL_MODE_PEDESTRIAN: PLATFORM_SCHEMA, TRAVEL_MODE_PUBLIC: PLATFORM_SCHEMA, TRAVEL_MODE_TRUCK: PLATFORM_SCHEMA, TRAVEL_MODE_PUBLIC_TIME_TABLE: PLATFORM_SCHEMA.extend( { vol.Exclusive(CONF_ARRIVAL, "arrival_departure"): cv.time, vol.Exclusive(CONF_DEPARTURE, "arrival_departure"): cv.time, } ), }, ), ) async def async_setup_platform( hass: HomeAssistant, config: Dict[str, Union[str, bool]], async_add_entities: Callable, discovery_info: Optional[DiscoveryInfoType] = None, ) -> None: """Set up the HERE travel time platform.""" api_key = config[CONF_API_KEY] here_client = herepy.RoutingApi(api_key) if not await hass.async_add_executor_job( _are_valid_client_credentials, here_client ): _LOGGER.error( "Invalid credentials. This error is returned if the specified token was invalid or no contract could be found for this token." ) return if config.get(CONF_ORIGIN_LATITUDE) is not None: origin = f"{config[CONF_ORIGIN_LATITUDE]},{config[CONF_ORIGIN_LONGITUDE]}" origin_entity_id = None else: origin = None origin_entity_id = config[CONF_ORIGIN_ENTITY_ID] if config.get(CONF_DESTINATION_LATITUDE) is not None: destination = ( f"{config[CONF_DESTINATION_LATITUDE]},{config[CONF_DESTINATION_LONGITUDE]}" ) destination_entity_id = None else: destination = None destination_entity_id = config[CONF_DESTINATION_ENTITY_ID] travel_mode = config[CONF_MODE] traffic_mode = config[CONF_TRAFFIC_MODE] route_mode = config[CONF_ROUTE_MODE] name = config[CONF_NAME] units = config.get(CONF_UNIT_SYSTEM, hass.config.units.name) arrival = config.get(CONF_ARRIVAL) departure = config.get(CONF_DEPARTURE) here_data = HERETravelTimeData( here_client, travel_mode, traffic_mode, route_mode, units, arrival, departure ) sensor = HERETravelTimeSensor( name, origin, destination, origin_entity_id, destination_entity_id, here_data ) async_add_entities([sensor]) def _are_valid_client_credentials(here_client: herepy.RoutingApi) -> bool: """Check if the provided credentials are correct using defaults.""" known_working_origin = [38.9, -77.04833] known_working_destination = [39.0, -77.1] try: here_client.car_route( known_working_origin, known_working_destination, [ herepy.RouteMode[ROUTE_MODE_FASTEST], herepy.RouteMode[TRAVEL_MODE_CAR], herepy.RouteMode[TRAFFIC_MODE_DISABLED], ], ) except herepy.InvalidCredentialsError: return False return True class HERETravelTimeSensor(Entity): """Representation of a HERE travel time sensor.""" def __init__( self, name: str, origin: str, destination: str, origin_entity_id: str, destination_entity_id: str, here_data: "HERETravelTimeData", ) -> None: """Initialize the sensor.""" self._name = name self._origin_entity_id = origin_entity_id self._destination_entity_id = destination_entity_id self._here_data = here_data self._unit_of_measurement = TIME_MINUTES self._attrs = { ATTR_UNIT_SYSTEM: self._here_data.units, ATTR_MODE: self._here_data.travel_mode, ATTR_TRAFFIC_MODE: self._here_data.traffic_mode, } if self._origin_entity_id is None: self._here_data.origin = origin if self._destination_entity_id is None: self._here_data.destination = destination async def async_added_to_hass(self) -> None: """Delay the sensor update to avoid entity not found warnings.""" @callback def delayed_sensor_update(event): """Update sensor after Home Assistant started.""" self.async_schedule_update_ha_state(True) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, delayed_sensor_update ) @property def state(self) -> Optional[str]: """Return the state of the sensor.""" if self._here_data.traffic_mode: if self._here_data.traffic_time is not None: return str(round(self._here_data.traffic_time / 60)) if self._here_data.base_time is not None: return str(round(self._here_data.base_time / 60)) return None @property def name(self) -> str: """Get the name of the sensor.""" return self._name @property def device_state_attributes( self, ) -> Optional[Dict[str, Union[None, float, str, bool]]]:
@property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return self._unit_of_measurement @property def icon(self) -> str: """Icon to use in the frontend depending on travel_mode.""" if self._here_data.travel_mode == TRAVEL_MODE_BICYCLE: return ICON_BICYCLE if self._here_data.travel_mode == TRAVEL_MODE_PEDESTRIAN: return ICON_PEDESTRIAN if self._here_data.travel_mode in TRAVEL_MODES_PUBLIC: return ICON_PUBLIC if self._here_data.travel_mode == TRAVEL_MODE_TRUCK: return ICON_TRUCK return ICON_CAR async def async_update(self) -> None: """Update Sensor Information.""" # Convert device_trackers to HERE friendly location if self._origin_entity_id is not None: self._here_data.origin = await self._get_location_from_entity( self._origin_entity_id ) if self._destination_entity_id is not None: self._here_data.destination = await self._get_location_from_entity( self._destination_entity_id ) await self.hass.async_add_executor_job(self._here_data.update) async def _get_location_from_entity(self, entity_id: str) -> Optional[str]: """Get the location from the entity state or attributes.""" entity = self.hass.states.get(entity_id) if entity is None: _LOGGER.error("Unable to find entity %s", entity_id) return None # Check if the entity has location attributes if location.has_location(entity): return self._get_location_from_attributes(entity) # Check if device is in a zone zone_entity = self.hass.states.get(f"zone.{entity.state}") if location.has_location(zone_entity): _LOGGER.debug( "%s is in %s, getting zone location", entity_id, zone_entity.entity_id ) return self._get_location_from_attributes(zone_entity) # Check if state is valid coordinate set if self._entity_state_is_valid_coordinate_set(entity.state): return entity.state _LOGGER.error( "The state of %s is not a valid set of coordinates: %s", entity_id, entity.state, ) return None @staticmethod def _entity_state_is_valid_coordinate_set(state: str) -> bool: """Check that the given string is a valid set of coordinates.""" schema = vol.Schema(cv.gps) try: coordinates = state.split(",") schema(coordinates) return True except (vol.MultipleInvalid): return False @staticmethod def _get_location_from_attributes(entity: State) -> str: """Get the lat/long string from an entities attributes.""" attr = entity.attributes return f"{attr.get(ATTR_LATITUDE)},{attr.get(ATTR_LONGITUDE)}" class HERETravelTimeData: """HERETravelTime data object.""" def __init__( self, here_client: herepy.RoutingApi, travel_mode: str, traffic_mode: bool, route_mode: str, units: str, arrival: datetime, departure: datetime, ) -> None: """Initialize herepy.""" self.origin = None self.destination = None self.travel_mode = travel_mode self.traffic_mode = traffic_mode self.route_mode = route_mode self.arrival = arrival self.departure = departure self.attribution = None self.traffic_time = None self.distance = None self.route = None self.base_time = None self.origin_name = None self.destination_name = None self.units = units self._client = here_client self.combine_change = True def update(self) -> None: """Get the latest data from HERE.""" if self.traffic_mode: traffic_mode = TRAFFIC_MODE_ENABLED else: traffic_mode = TRAFFIC_MODE_DISABLED if self.destination is not None and self.origin is not None: # Convert location to HERE friendly location destination = self.destination.split(",") origin = self.origin.split(",") arrival = self.arrival if arrival is not None: arrival = convert_time_to_isodate(arrival) departure = self.departure if departure is not None: departure = convert_time_to_isodate(departure) if departure is None and arrival is None: departure = "now" _LOGGER.debug( "Requesting route for origin: %s, destination: %s, route_mode: %s, mode: %s, traffic_mode: %s, arrival: %s, departure: %s", origin, destination, herepy.RouteMode[self.route_mode], herepy.RouteMode[self.travel_mode], herepy.RouteMode[traffic_mode], arrival, departure, ) try: response = self._client.public_transport_timetable( origin, destination, self.combine_change, [ herepy.RouteMode[self.route_mode], herepy.RouteMode[self.travel_mode], herepy.RouteMode[traffic_mode], ], arrival=arrival, departure=departure, ) except herepy.NoRouteFoundError: # Better error message for cryptic no route error codes _LOGGER.error(NO_ROUTE_ERROR_MESSAGE) return _LOGGER.debug("Raw response is: %s", response.response) # pylint: disable=no-member source_attribution = response.response.get("sourceAttribution") if source_attribution is not None: self.attribution = self._build_hass_attribution(source_attribution) # pylint: disable=no-member route = response.response["route"] summary = route[0]["summary"] waypoint = route[0]["waypoint"] self.base_time = summary["baseTime"] if self.travel_mode in TRAVEL_MODES_VEHICLE: self.traffic_time = summary["trafficTime"] else: self.traffic_time = self.base_time distance = summary["distance"] if self.units == CONF_UNIT_SYSTEM_IMPERIAL: # Convert to miles. self.distance = distance / 1609.344 else: # Convert to kilometers self.distance = distance / 1000 # pylint: disable=no-member self.route = response.route_short self.origin_name = waypoint[0]["mappedRoadName"] self.destination_name = waypoint[1]["mappedRoadName"] @staticmethod def _build_hass_attribution(source_attribution: Dict) -> Optional[str]: """Build a hass frontend ready string out of the sourceAttribution.""" suppliers = source_attribution.get("supplier") if suppliers is not None: supplier_titles = [] for supplier in suppliers: title = supplier.get("title") if title is not None: supplier_titles.append(title) joined_supplier_titles = ",".join(supplier_titles) attribution = f"With the support of {joined_supplier_titles}. All information is provided without warranty of any kind." return attribution def convert_time_to_isodate(timestr: str) -> str: """Take a string like 08:00:00 and combine it with the current date.""" combined = datetime.combine(dt.start_of_local_day(), dt.parse_time(timestr)) if combined < datetime.now(): combined = combined + timedelta(days=1) return combined.isoformat()
"""Return the state attributes.""" if self._here_data.base_time is None: return None res = self._attrs if self._here_data.attribution is not None: res[ATTR_ATTRIBUTION] = self._here_data.attribution res[ATTR_DURATION] = self._here_data.base_time / 60 res[ATTR_DISTANCE] = self._here_data.distance res[ATTR_ROUTE] = self._here_data.route res[ATTR_DURATION_IN_TRAFFIC] = self._here_data.traffic_time / 60 res[ATTR_ORIGIN] = self._here_data.origin res[ATTR_DESTINATION] = self._here_data.destination res[ATTR_ORIGIN_NAME] = self._here_data.origin_name res[ATTR_DESTINATION_NAME] = self._here_data.destination_name return res
ar.min.js
/*! * Web Experience Toolkit (WET) / Boîte à outils de l'expérience Web (BOEW) * wet-boew.github.io/wet-boew/License-en.html / wet-boew.github.io/wet-boew/Licence-fr.html * v4.0.5 - 2014-11-14 *
*/!function(a){"use strict";a.i18nDict={"lang-code":"ar","lang-native":"العربية",all:"جميع",tphp:"أعلى الصفحة",load:" تحميل ...",process:"تجهيز ...",srch:"البحث","no-match":"لا نتائج تذكر لل",matches:{mixin:"العثور على [MIXIN] مباريات"},current:"(الحالي)",hide:"إخفاء",err:"خطأ",colon:":",hyphen:" - ","full-stop":".","comma-space":"، ",space:"&#32;",start:"بداية",stop:"توقف",back:"ظهر",cancel:"إلغاء","min-ago":"منذ دقيقة واحدة","coup-mins":"بضع دقائق قبل","mins-ago":{mixin:"منذ [MIXIN] دقيقة"},"hour-ago":"قبل ساعة","hours-ago":{mixin:"منذ [MIXIN] ساعات"},"days-ago":{mixin:"منذ [MIXIN] أيام"},yesterday:"أمس",nxt:"التالي","nxt-r":"مفتاح السهم الأيمن - التالي",prv:"سابق","prv-l":"مفتاح السهم الأيسر - سابق",first:"الأول",last:"آخر","srch-menus":"البحث والقوائم",email:"البريد الإلكتروني","menu-close":"إغلاق القائمة","overlay-close":"إغلاق تراكب","esc-key":"مفتاح الهروب",show:"عرض","tab-rot":{off:"ايقاف الدوران علامة التبويب",on:"بدء دوران التبويب"},"tab-list":"علامة التبويب قائمة","tab-pnl-end1":"نهاية هذا الفريق التبويب.","tab-pnl-end2":"العودة إلى قائمة التبويب","tab-pnl-end3":"أو الاستمرار في بقية الصفحة.",play:"لعب",pause:"وقفة",open:"فتح",close:"أغلق",volume:"جهارة الصوت",mute:{on:"كتم",off:"إلغاء كتمه"},cc:{off:"أغلقت إخفاء السفلية",on:"أغلقت تظهر السفلية"},"cc-err":"خطأ في تحميل تعليق مغلق",adesc:{on:"تمكين الوصف السمعي",off:"تعطيل الوصف السمعي"},pos:"الوظيفة الحالية:",dur:"إجمالي الوقت:","shr-txt":"مشاركة","shr-pg":" هذه الصفحة","shr-vid":" هذا الفيديو","shr-aud":" هذا الملف الصوتي","shr-hnt":" مع {s} ","shr-disc":"وأعرب عن تأييد أي أي منتجات أو خدمات أو ضمنية.","frm-nosubmit":"لا يمكن أن تقدم على شكل ل ","errs-fnd":" تم العثور على أخطاء.","err-fnd":" تم العثور على خطأ.","date-show":"اختيار تاريخ من التقويم لحقل:","date-sel":"مختار",days:["الأحد","يوم الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت"],mnths:["يناير","فبراير","مسيرة","أبريل","قد","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوفمبر","ديسمبر"],cal:"تقويم",currDay:"(اليوم الحالي)","cal-goToLnk":'<span class="wb-inv">انتقل إلى الشهر </span>من العام',"cal-goToTtl":"انتقل إلى الشهر من العام","cal-goToMnth":"الشهر:","cal-goToYr":"العام:","cal-goToBtn":"تذهب",prvMnth:"الشهر الماضي: ",nxtMnth:"الشهر المقبل: ","lb-curr":"البند %curr% من %total%","lb-xhr-err":"فشل هذا المحتوى ليتم تحميلها.","lb-img-err":"فشلت هذه الصورة ليتم تحميلها.","tbl-txt":"جدول","tbl-dtls":"الرسم البياني. التفاصيل في الجدول التالي.","st-to-msg-bgn":"سوف تنتهي جلسة العمل الخاصة بك تلقائيا في على #min# دقيقة #sec# ثانية.","st-to-msg-end":'اختر "متابعة الجلسة" لتمديد جلسة العمل الخاصة بك.',"st-msgbx-ttl":"الدورة مهلة الإنذار","st-alrdy-to-msg":"عذرا انتهت صلاحية جلسة العمل الخاصة بك بالفعل. يرجى تسجيل الدخول مرة أخرى.","st-btn-cont":"تستمر الدورة","st-btn-end":"إنهاء جلسة الآن","td-toggle":"تبديل جميع","td-open":"توسيع الكل","td-close":"طي الكل","td-ttl-open":"توسيع كافة أقسام محتوى","td-ttl-close":"انهيار جميع قطاعات المحتوى",sortAsc:": تفعيل لنوع تصاعدي",sortDesc:": تفعيل لفرز تنازلي",emptyTbl:"لا تتوفر بيانات في الجدول",infoEntr:"عرض _START_ إلى _END_ من _TOTAL_ مقالات",infoEmpty:"عرض 0 إلى 0 من 0 مقالات",infoFilt:"(تمت تصفيتها في الفترة من _MAX_ مجموع المقالات)",info1000:",",lenMenu:"عرض _MENU_ مقالات",filter:"تصفية العناصر","geo-mapctrl":"@geo-mapctrl@","geo-zmin":"تكبير","geo-zmout":"تصغير","geo-zmwrld":"التكبير لتعيين حد","geo-zmfeat":"تكبير إلى العنصر","geo-sclln":"خريطة نطاق","geo-msepos":"خطوط الطول والعرض من مؤشر الماوس","geo-ariamap":"خريطة الكائن. أوصاف الميزات الخريطة هي في الجدول أدناه.","geo-ally":"<strong>مستخدمي لوحة المفاتيح:</strong> عندما الخريطة هي في التركيز، استخدم مفاتيح الأسهم لتحريك الخريطة ومفاتيح زائد وناقص لتكبير. فإن مفاتيح الأسهم لا تحريك الخريطة عند التكبير إلى حد الخريطة.","geo-allyttl":"تعليمات: خريطة الملاحة","geo-tgllyr":"تبديل عرض طبقة","geo-hdnlyr":"حاليا يتم إخفاء هذه الطبقة.","geo-bmapurl":"http://geoappext.nrcan.gc.ca/arcgis/rest/services/BaseMaps/CBMT_CBCT_GEOM_3978/MapServer/WMTS/tile/1.0.0/BaseMaps_CBMT3978/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpg","geo-bmapttl":"BaseMaps_CBMT3978","geo-bmapurltxt":"http://geoappext.nrcan.gc.ca/arcgis/rest/services/BaseMaps/CBMT_TXT_3978/MapServer/WMTS/tile/1.0.0/BaseMaps_CBMT3978/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpg","geo-attrlnk":"http://geogratis.gc.ca/geogratis/CBM_CBC?lang=en","geo-attrttl":"GeoGratis - كندا قاعدة الخريطة (الإنجليزية أو الفرنسية فقط)","geo-sel":"حدد","geo-lblsel":"تحقق لتحديد عنصر على الخريطة","geo-locurl-geogratis":"http://geogratis.gc.ca/services/geolocation/en/locate","geo-loc-placeholder":"تشير إلى اسم الموقع، والرمز البريدي والعنوان (البريدي)، وعدد من NTS ...","geo-loc-label":"موقع","geo-aoi-north":"شمال","geo-aoi-east":"شرق","geo-aoi-south":"جنوب","geo-aoi-west":"غرب","geo-aoi-instructions":'تحدد مساحة على الخريطة أو أدخل التفاصيل أدناه ثم انقر على زر "رسم".',"geo-aoi-btndraw":"رسم","geo-aoi-btnclear":"نزع","geo-geoloc-btn":"التكبير لموقعك الحالي","geo-geoloc-fail":"فشل الموقع. يرجى التأكد من أن يتم تمكين خدمات الموقع.","geo-geoloc-uncapable":"غير معتمد التعريب بواسطة المتصفح الخاص بك.","wb-disable":"إصدار HTML الأساسي","wb-enable":"الإصدار القياسي","tmpl-signin":"تسجيل الدخول"}}(wb),wb.doc.one("formLanguages.wb",function(){!function(a){a.extend(a.validator.messages,{required:"هذا الحقل إلزامي",remote:"يرجى تصحيح هذا الحقل للمتابعة",email:"رجاء إدخال عنوان بريد إلكتروني صحيح",url:"رجاء إدخال عنوان موقع إلكتروني صحيح",date:"رجاء إدخال تاريخ صحيح",dateISO:"رجاء إدخال تاريخ صحيح (ISO)",number:"رجاء إدخال عدد بطريقة صحيحة",digits:"رجاء إدخال أرقام فقط",creditcard:"رجاء إدخال رقم بطاقة ائتمان صحيح",equalTo:"رجاء إدخال نفس القيمة",accept:"رجاء إدخال ملف بامتداد موافق عليه",maxlength:a.validator.format("الحد الأقصى لعدد الحروف هو {0}"),minlength:a.validator.format("الحد الأدنى لعدد الحروف هو {0}"),rangelength:a.validator.format("عدد الحروف يجب أن يكون بين {0} و {1}"),range:a.validator.format("رجاء إدخال عدد قيمته بين {0} و {1}"),max:a.validator.format("رجاء إدخال عدد أقل من أو يساوي (0}"),min:a.validator.format("رجاء إدخال عدد أكبر من أو يساوي (0}")})}(jQuery)});
declarations.rs
use std::str; use nom::{ bytes::complete::tag, character::complete::char, error::{FromExternalError, ParseError}, multi::separated_list1, IResult, }; use crate::{ comments::{space_or_comment0, space_or_comment1}, predicates::types::{pred_par_type, PredParType}, primitive_literals::identifier, }; #[derive(PartialEq, Clone, Debug)] pub struct PredicateItem { pub id: String, pub parameters: Vec<(PredParType, String)>, } pub fn predicate_item<'a, E: ParseError<&'a str>>( input: &'a str, ) -> IResult<&'a str, PredicateItem, E> where E: FromExternalError<&'a str, std::num::ParseIntError> + FromExternalError<&'a str, std::num::ParseFloatError>, { let (input, _) = space_or_comment0(input)?; let (input, _) = tag("predicate")(input)?; let (input, _) = space_or_comment1(input)?; let (input, id) = identifier(input)?; let (input, _) = char('(')(input)?; let (input, parameters) = separated_list1(char(','), pred_par_type_ident_pair)(input)?; let (input, _) = char(')')(input)?; let (input, _) = space_or_comment0(input)?; let (input, _) = char(';')(input)?; let (input, _) = space_or_comment0(input)?; Ok((input, PredicateItem { id, parameters })) } pub fn pred_par_type_ident_pair<'a, E: ParseError<&'a str>>( input: &'a str, ) -> IResult<&'a str, (PredParType, String), E> where E: FromExternalError<&'a str, std::num::ParseIntError> + FromExternalError<&'a str, std::num::ParseFloatError>, { let (input, _) = space_or_comment0(input)?; let (input, pred_par_type) = pred_par_type(input)?; let (input, _) = space_or_comment0(input)?; let (input, _) = char(':')(input)?; let (input, _) = space_or_comment0(input)?; let (input, ident) = identifier(input)?; let (input, _) = space_or_comment0(input)?; Ok((input, (pred_par_type, ident))) } #[test] fn
() { use crate::predicates::types::BasicPredParType; use nom::error::VerboseError; use std::str; assert_eq!( predicate_item::<VerboseError<&str>>("predicate float_03({1.0,3.3}:c);"), Ok(( "", PredicateItem { id: "float_03".to_string(), parameters: vec![( PredParType::Basic(BasicPredParType::FloatInSet(vec![1.0, 3.3])), "c".to_string() )] } )) ); } #[test] #[should_panic] fn test_predicate_item_2() { use nom::error::VerboseError; use std::str; predicate_item::<VerboseError<&str>>("predicate float_01(set of float:c);").unwrap(); }
test_predicate_item
block.rs
use crate::header::{Header, HeaderBuilder}; use crate::transaction::{ProposalShortId, Transaction}; use crate::uncle::{uncles_hash, UncleBlock}; use crate::Capacity; use ckb_hash::new_blake2b; use ckb_merkle_tree::merkle_root; use ckb_occupied_capacity::Result as CapacityResult; use numext_fixed_hash::H256; use serde_derive::{Deserialize, Serialize}; use std::borrow::ToOwned; use std::collections::HashSet; use std::ops::Deref; fn cal_transactions_root(vec: &[Transaction]) -> H256 { merkle_root( &vec.iter() .map(|tx| tx.hash().to_owned()) .collect::<Vec<_>>(), ) } fn cal_witnesses_root(vec: &[Transaction]) -> H256 { merkle_root( &vec.iter() .map(|tx| tx.witness_hash().to_owned()) .collect::<Vec<_>>(), ) } pub(crate) fn cal_proposals_hash(vec: &[ProposalShortId]) -> H256 { if vec.is_empty() { H256::zero() } else { let mut ret = [0u8; 32]; let mut blake2b = new_blake2b(); for id in vec.iter() { blake2b.update(id.deref()); } blake2b.finalize(&mut ret); ret.into() } } #[derive(Clone, Serialize, Deserialize, Eq, Debug)] pub struct Block { header: Header, uncles: Vec<UncleBlock>, transactions: Vec<Transaction>, proposals: Vec<ProposalShortId>, } impl Block { pub fn new( header: Header, uncles: Vec<UncleBlock>, transactions: Vec<Transaction>, proposals: Vec<ProposalShortId>, ) -> Block { Block { header, uncles, transactions, proposals, } } pub fn header(&self) -> &Header { &self.header } pub fn is_genesis(&self) -> bool { self.header.is_genesis() } pub fn transactions(&self) -> &[Transaction] { &self.transactions } pub fn cellbase(&self) -> &Transaction { &self.transactions.get(0).expect("get cellbase transaction") } pub fn proposals(&self) -> &[ProposalShortId] { &self.proposals } pub fn uncles(&self) -> &[UncleBlock] { &self.uncles } pub fn cal_uncles_hash(&self) -> H256 { uncles_hash(&self.uncles) } pub fn union_proposal_ids_iter(&self) -> impl Iterator<Item = &ProposalShortId> { self.proposals() .iter() .chain(self.uncles.iter().flat_map(|u| u.proposals())) } pub fn union_proposal_ids(&self) -> HashSet<ProposalShortId> { self.union_proposal_ids_iter().cloned().collect() } pub fn cal_witnesses_root(&self) -> H256 { cal_witnesses_root(self.transactions()) } pub fn cal_transactions_root(&self) -> H256 { cal_transactions_root(self.transactions()) } pub fn cal_proposals_hash(&self) -> H256 { cal_proposals_hash(self.proposals()) } pub fn serialized_size(&self, proof_size: usize) -> usize { Header::serialized_size(proof_size) + self .uncles .iter() .map(|u| u.serialized_size(proof_size)) .sum::<usize>() + 4 + self.proposals.len() * ProposalShortId::serialized_size() + 4 + self .transactions() .iter() .map(Transaction::serialized_size) .sum::<usize>() + 4 } pub fn outputs_capacity(&self) -> CapacityResult<Capacity> { self.transactions .iter() .try_fold(Capacity::zero(), |capacity, tx| { tx.outputs_capacity().and_then(|c| capacity.safe_add(c)) }) } } impl ::std::hash::Hash for Block { fn hash<H>(&self, state: &mut H) where H: ::std::hash::Hasher, { state.write(&self.header.hash().as_bytes()); state.finish(); } } impl PartialEq for Block { fn eq(&self, other: &Block) -> bool { self.header().hash() == other.header().hash() } } #[derive(Default)] pub struct BlockBuilder { header_builder: HeaderBuilder, uncles: Vec<UncleBlock>, transactions: Vec<Transaction>, proposals: Vec<ProposalShortId>, } impl BlockBuilder { pub fn from_block(block: Block) -> Self { let Block { header, uncles, transactions, proposals, } = block; Self { header_builder: HeaderBuilder::from_header(header), uncles, transactions, proposals, } } pub fn from_header_builder(header_builder: HeaderBuilder) -> Self { Self { header_builder, uncles: Vec::new(), transactions: Vec::new(), proposals: Vec::new(), } } pub fn header_builder(mut self, header_builder: HeaderBuilder) -> Self { self.header_builder = header_builder; self } pub fn header<T>(mut self, header: T) -> Self where T: Into<Header>, { self.header_builder = HeaderBuilder::from_header(header.into()); self } pub fn uncle<T>(mut self, uncle: T) -> Self where T: Into<UncleBlock>, { self.uncles.push(uncle.into()); self } pub fn uncles<I, T>(mut self, uncles: I) -> Self where I: IntoIterator<Item = T>, T: Into<UncleBlock>, { self.uncles.extend(uncles.into_iter().map(Into::into)); self } pub fn transaction<T>(mut self, transaction: T) -> Self where T: Into<Transaction>, { self.transactions.push(transaction.into()); self } pub fn transactions<I, T>(mut self, transactions: I) -> Self where I: IntoIterator<Item = T>, T: Into<Transaction>, { self.transactions .extend(transactions.into_iter().map(Into::into)); self } pub fn proposal<T>(mut self, proposal_short_id: T) -> Self where T: Into<ProposalShortId>, { self.proposals.push(proposal_short_id.into()); self } pub fn proposals<I, T>(mut self, proposal_short_ids: I) -> Self where I: IntoIterator<Item = T>, T: Into<ProposalShortId>, { self.proposals .extend(proposal_short_ids.into_iter().map(Into::into)); self } /// # Warning /// /// For testing purpose only, this method is used to construct a incorrect Block. pub unsafe fn build_unchecked(self) -> Block { let Self { header_builder, uncles, transactions, proposals,
header: header_builder.build(), uncles, transactions, proposals, } } pub fn build(self) -> Block { let Self { header_builder, uncles, transactions, proposals, } = self; let transactions_root = cal_transactions_root(&transactions); let witnesses_root = cal_witnesses_root(&transactions); let proposals_hash = cal_proposals_hash(&proposals); let uncles_hash = uncles_hash(&uncles); let header = header_builder .transactions_root(transactions_root) .witnesses_root(witnesses_root) .proposals_hash(proposals_hash) .uncles_hash(uncles_hash) .uncles_count(uncles.len() as u32) .build(); Block { header, uncles, transactions, proposals, } } } #[cfg(test)] mod tests { use super::*; use numext_fixed_hash::H256; #[test] fn test_cal_proposals_hash() { let proposal1 = ProposalShortId::new([1; 10]); let proposal2 = ProposalShortId::new([2; 10]); let proposals = [proposal1, proposal2]; let id = cal_proposals_hash(&proposals); assert_eq!( id, H256([ 0xd1, 0x67, 0x0e, 0x45, 0xaf, 0x1d, 0xeb, 0x9c, 0xc0, 0x09, 0x51, 0xd7, 0x1c, 0x09, 0xce, 0x80, 0x93, 0x2e, 0x7d, 0xdf, 0x9f, 0xb1, 0x51, 0xd7, 0x44, 0x43, 0x6b, 0xd0, 0x4a, 0xc4, 0xa5, 0x62 ]) ); } }
} = self; Block {
file_upload.py
""" File upload page using a png file """ from selenium.webdriver.common.by import By from pages.base_page import BasePage class
(BasePage): FILE_UP = (By.ID, 'file-upload-field') def upload_file(self): file_up = self.driver.find_element(*self.FILE_UP) file_up.send_keys('C:/Users/anton/PycharmProjects/Automation_testing/exercices_todo/blue.png')
FileUpload
css_formatter.rs
/// Utility functions to output css strings. pub struct CssFormatter; impl CssFormatter { /// Output the value in pixels. pub fn px(x:i32) -> String
/// Output the position. pub fn position(x:i32 , y:i32) -> String { format!("{} {}", CssFormatter::px(x), CssFormatter::px(y) ) } }
{ format!("{}{}", x, if x!=0 {"px"} else {""}) }
test_locks.py
import json from unittest import TestCase from time import sleep from cs3api4lab.tests.share_test_base import ShareTestBase from traitlets.config import LoggingConfigurable import urllib.parse class TestLocks(ShareTestBase, TestCase): einstein_id = '4c510ada-c86b-4815-8820-42cdf82c3d51' einstein_idp = 'cernbox.cern.ch' marie_id = 'f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c' marie_idp = 'cesnet.cz' richard_id = '932b4540-8d16-481e-8ef4-588e4b6b151c' richard_idp = 'example.org' receiver_role = 'viewer' receiver_grantee_type = 'user' file_path = '/home/test_locks.txt' shared_file_path = '/reva/einstein/test_locks.txt' storage_id = '123e4567-e89b-12d3-a456-426655440000' share_id = None conflict_name = None def test_lock_created_when_file_written(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_lock_created_when_file_read(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] for chunk in self.file_api.read_file(self.file_name): continue file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_file_locked_conflict_created(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name) def test_write_dir_file_locked(self):
def test_write_file_lock_expired(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') sleep(12) self.richard_file_api.write_file(shared_name, "richard_content") content = self.read_file_content(self.richard_file_api, shared_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_by_lock_owner_file_locked(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.file_api.write_file(self.file_name, 'new_content') content = self.read_file_content(self.file_api, self.file_name) self.assertEqual(content, 'new_content', 'File ' + self.file_name + ' should contain the string: ' + 'new_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name)
suffix = self.get_random_suffix() self.file_name = '/home/testdir/test_locks.txt' + suffix shared_name = '/reva/einstein/testdir/test_locks.txt' + suffix try: try: self.file_api.create_directory('/home/testdir') except: pass #ignore already existing directory created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name)
coreapi.py
""" sentry.coreapi ~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ # TODO: We should make the API a class, and UDP/HTTP just inherit from it # This will make it so we can more easily control logging with various # metadata (rather than generic log messages which arent useful). from datetime import datetime import base64 import logging import time import uuid import zlib from django.utils.encoding import smart_str from sentry.conf import settings from sentry.exceptions import InvalidInterface, InvalidData, InvalidTimestamp from sentry.models import Project, ProjectKey, TeamMember, Team from sentry.plugins import plugins from sentry.tasks.store import store_event from sentry.utils import is_float, json from sentry.utils.auth import get_signature, parse_auth_header from sentry.utils.imports import import_string from sentry.utils.queue import maybe_delay logger = logging.getLogger('sentry.errors.coreapi') RESERVED_FIELDS = ( 'project', 'event_id', 'message', 'checksum', 'culprit', 'level', 'time_spent', 'logger', 'server_name', 'site', 'timestamp', 'extra', 'modules', ) REQUIRED_FIELDS = ( 'message', ) class APIError(Exception): http_status = 400 msg = 'Invalid request' def __init__(self, msg=None): if msg: self.msg = msg class APIUnauthorized(APIError): http_status = 401 msg = 'Unauthorized' class APIForbidden(APIError): http_status = 403 class APITimestampExpired(APIError): http_status = 410 def extract_auth_vars(request): if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'): # Auth version 3.0 (same as 2.0, diff header) return parse_auth_header(request.META['HTTP_X_SENTRY_AUTH']) elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'): # Auth version 2.0 return parse_auth_header(request.META['HTTP_AUTHORIZATION']) else: return None def project_from_auth_vars(auth_vars, data, require_signature=True): api_key = auth_vars.get('sentry_key') if api_key: try: pk = ProjectKey.objects.get_from_cache(public_key=api_key) except ProjectKey.DoesNotExist: raise APIForbidden('Invalid signature') project = Project.objects.get_from_cache(pk=pk.project_id) secret_key = pk.secret_key if pk.user: try: team = Team.objects.get_from_cache(pk=project.team_id) except Team.DoesNotExist: raise APIUnauthorized('Member does not have access to project') try: tm = TeamMember.objects.get(team=team, user=pk.user, is_active=True) except TeamMember.DoesNotExist: raise APIUnauthorized('Member does not have access to project') if not pk.user.is_active: raise APIUnauthorized('Account is not active') result = plugins.first('has_perm', tm.user, 'create_event', project) if result is False: raise APIUnauthorized() else: project = None secret_key = settings.KEY signature = auth_vars.get('sentry_signature') timestamp = auth_vars.get('sentry_timestamp') if signature and timestamp: validate_hmac(data, signature, timestamp, secret_key) elif require_signature: raise APIUnauthorized() return project def validate_hmac(message, signature, timestamp, secret_key): try: timestamp_float = float(timestamp) except ValueError: raise APIError('Invalid timestamp') if timestamp_float < time.time() - 3600: # 1 hour raise APITimestampExpired('Message has expired') sig_hmac = get_signature(message, timestamp, secret_key) if sig_hmac != signature: raise APIForbidden('Invalid signature') def project_from_api_key_and_id(api_key, project_id): """ Given a public api key and a project id returns a project instance or throws APIUnauthorized. """ try: pk = ProjectKey.objects.get_from_cache(public_key=api_key) except ProjectKey.DoesNotExist: raise APIUnauthorized('Invalid api key') if str(project_id).isdigit(): if str(pk.project_id) != str(project_id): raise APIUnauthorized('Invalid project id') else: if str(pk.project.slug) != str(project_id): raise APIUnauthorized('Invalid project id') project = Project.objects.get_from_cache(pk=pk.project_id) if pk.user: team = Team.objects.get_from_cache(pk=project.team_id) try: tm = TeamMember.objects.get(team=team, user=pk.user, is_active=True) except TeamMember.DoesNotExist: raise APIUnauthorized('Member does not have access to project') if not pk.user.is_active: raise APIUnauthorized('Account is not active') tm.project = project result = plugins.first('has_perm', tm.user, 'create_event', project) if result is False: raise APIUnauthorized() return project def project_from_id(request): """ Given a request returns a project instance or throws APIUnauthorized. """ if not request.user.is_active: raise APIUnauthorized('Account is not active') try: project = Project.objects.get_from_cache(pk=request.GET['project_id']) except Project.DoesNotExist: raise APIUnauthorized('Invalid project') try: team = Team.objects.get_from_cache(pk=project.team_id) except Project.DoesNotExist: raise APIUnauthorized('Member does not have access to project') try: TeamMember.objects.get( user=request.user, team=team, is_active=True, ) except TeamMember.DoesNotExist: raise APIUnauthorized('Member does not have access to project') result = plugins.first('has_perm', request.user, 'create_event', project) if result is False: raise APIUnauthorized() return project def decode_and_decompress_data(encoded_data): try: try: return base64.b64decode(encoded_data).decode('zlib') except zlib.error: return base64.b64decode(encoded_data) except Exception, e: # This error should be caught as it suggests that there's a # bug somewhere in the client's code. logger.exception('Bad data received') raise APIForbidden('Bad data decoding request (%s, %s)' % ( e.__class__.__name__, e)) def safely_load_json_string(json_string): try: obj = json.loads(json_string) except Exception, e: # This error should be caught as it suggests that there's a # bug somewhere in the client's code. logger.exception('Bad data received') raise APIForbidden('Bad data reconstructing object (%s, %s)' % ( e.__class__.__name__, e)) # XXX: ensure keys are coerced to strings return dict((smart_str(k), v) for k, v in obj.iteritems()) def ensure_valid_project_id(desired_project, data): # Confirm they're using either the master key, or their specified project # matches with the signed project. if desired_project: if str(data.get('project', '')) not in [str(desired_project.pk), desired_project.slug]: raise APIForbidden('Invalid credentials') data['project'] = desired_project.pk elif not desired_project: data['project'] = 1 def process_data_timestamp(data): if is_float(data['timestamp']): try: data['timestamp'] = datetime.fromtimestamp(float(data['timestamp'])) except: logger.exception('Failed reading timestamp') del data['timestamp'] elif not isinstance(data['timestamp'], datetime): if '.' in data['timestamp']: format = '%Y-%m-%dT%H:%M:%S.%f' else:
if 'Z' in data['timestamp']: # support UTC market, but not other timestamps format += 'Z' try: data['timestamp'] = datetime.strptime(data['timestamp'], format) except: raise InvalidTimestamp('Invalid value for timestamp: %r' % data['timestamp']) return data def validate_data(project, data, client=None): for k in REQUIRED_FIELDS: if not data.get(k): raise InvalidData('Missing required parameter: %r' % k) ensure_valid_project_id(project, data) if 'event_id' not in data: data['event_id'] = uuid.uuid4().hex if 'timestamp' in data: try: process_data_timestamp(data) except InvalidTimestamp: # Log the error, remove the timestamp, and continue logger.error('Client %r passed an invalid value for timestamp %r' % ( data['timestamp'], client or '<unknown client>', )) del data['timestamp'] if data.get('modules') and type(data['modules']) != dict: raise InvalidData('Invalid type for \'modules\': must be a mapping') for k, v in data.iteritems(): if k in RESERVED_FIELDS: continue if '.' not in k: raise InvalidInterface('%r is not a valid interface name' % k) try: interface = import_string(k) except (ImportError, AttributeError), e: raise InvalidInterface('%r is not a valid interface name: %s' % (k, e)) try: data[k] = interface(**v).serialize() except Exception, e: raise InvalidData('Unable to validate interface, %r: %s' % (k, e)) level = data.get('level') or settings.DEFAULT_LOG_LEVEL if isinstance(level, basestring) and not level.isdigit(): # assume it's something like 'warn' data['level'] = settings.LOG_LEVEL_REVERSE_MAP[level] return data def insert_data_to_database(data): maybe_delay(store_event, data=data)
format = '%Y-%m-%dT%H:%M:%S'
133_uni_tech_sydney.py
import requests import urllib.request import time import urllib import re import csv import sys from bs4 import BeautifulSoup def uni_tech_sydney(): url = "https://www.uts.edu.au/about/faculty-engineering-and-information-technology/computer-science/school-computer-science-staff" headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} r = requests.get(url, headers=headers) # getting the soup by parsing the html parsel to text to request r soup = BeautifulSoup(r.text, "html5lib") # print(soup.prettify) # file initialization to write file_name = sys.argv[0] # file_name = file_name[4:] txt_file = file_name.replace(".py", ".txt") f = open(txt_file, "w") csv_file = file_name.replace(".py", ".csv") f2 = open(csv_file, "w") csvwriter = csv.writer(f2)
overall_file = "all_emails.csv" f3 = open(overall_file, "a") csvwriter2 = csv.writer(f3) u_name = "University of Technology, Sydney" country = "Australia" grabage_emails = [] var = [f, csvwriter, csvwriter2, u_name, country, grabage_emails] # d gives the array of all profs on the dept homepage dd = soup.find('table', {'class':"staff-list"}) d = dd.find_all('tr') #iterating for every prof for i in d: td = i.find('td', {'class':'table--cell-width-l'}) if td == None: continue a = td.find('a') if a == None: continue link = "https://www.uts.edu.au"+a.get('href') name = (a.get_text()).strip() name = " ".join(name.split()) name = name.split(',') name = name[1]+' '+name[0] # print(name, link) # check if link is valid on Not try: prof_resp = requests.get(link, headers=headers) except: continue email = "Not Found" print(name, email, link) filterandgetEmail(var, grabage_emails, name, link, email, prof_resp) f.close() f2.close() f3.close() print("Finished") def filterandgetEmail(var, grabage_emails, name, link, email, prof_resp): f = var[0] csvwriter = var[1] csvwriter2 = var[2] u_name = var[3] country = var[4] keyword_list = ['Computer Architecture','hardware and system architecture', 'hardware and architecture', 'Computerarchitectuur', 'embedded system', 'computer organization','VLSI Design', 'Computer and System', 'multiprocessor architecture'] flag = 1 prof_soup = BeautifulSoup(prof_resp.text, "html.parser") # print(prof_soup) research_text = prof_soup.text for pattern in keyword_list: if re.search(pattern, research_text, re.IGNORECASE): flag = 0 if email != 'Not Found': f.write(link + '\n' + name + "\t"+ email + "\n") csvwriter.writerow([u_name, country, name, email, link]) csvwriter2.writerow([u_name, country, name, email, link]) else: new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text)) for eemail in grabage_emails: if eemail in new_emails: new_emails.remove(eemail) if len(new_emails) == 0: email = "Email Not Found" f.write(link + '\n' + name + "\t"+ email + "\n") csvwriter.writerow([u_name, country, name, email, link]) csvwriter2.writerow([u_name, country, name, email, link]) else: # f.write(link + '\n' + name) for email in new_emails: f.write(link + '\n' + name + '\t\t' + email + '\n') csvwriter.writerow([u_name, country, name, email, link]) csvwriter2.writerow([u_name, country, name, email, link]) # f.write("\n") f.write(pattern) f.write('\n\n') break if __name__ == '__main__': uni_tech_sydney()
imageify.pipe.js
"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require('@angular/core'); var ImageifyPipe = (function () { function
() { } ImageifyPipe.prototype.transform = function (value, args) { if (!value) return ''; var pokemonName = value .replace('♀', 'f') .replace('♂', 'm') .replace(/\W+/g, "") .toLocaleLowerCase(); var url = "public/assets/img/pokemons/" + pokemonName + ".jpg"; return url; }; ImageifyPipe = __decorate([ core_1.Pipe({ name: 'imageify' }), __metadata('design:paramtypes', []) ], ImageifyPipe); return ImageifyPipe; }()); exports.ImageifyPipe = ImageifyPipe; //# sourceMappingURL=imageify.pipe.js.map
ImageifyPipe
s2_put_skeleton_txts_to_a_single_txt.py
#!/usr/bin/env python # coding: utf-8 ''' Read multiple skeletons txts and saved them into a single txt. If an image doesn't have skeleton, discard it. If an image label is not `CLASSES`, discard it. Input: `skeletons/00001.txt` ~ `skeletons/xxxxx.txt` from `SRC_DETECTED_SKELETONS_FOLDER`. Output: `skeletons_info.txt`. The filepath is `DST_ALL_SKELETONS_TXT`. ''' import numpy as np import simplejson import collections if True: # Include project path import sys import os ROOT = os.path.dirname(os.path.abspath(__file__))+"/../" CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/" sys.path.append(ROOT) # import utils.lib_feature_proc # This is no needed, # because this script only transfer (part of) the data from many txts to a single txt, # without doing any data analsysis. import utils.lib_commons as lib_commons def par(path): # Pre-Append ROOT to the path if it's not absolute return ROOT + path if (path and path[0] != "/") else path # -- Settings cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml") cfg = cfg_all["s2_put_skeleton_txts_to_a_single_txt.py"] CLASSES = np.array(cfg_all["classes"]) SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"] SRC_DETECTED_SKELETONS_FOLDER = par(cfg["input"]["detected_skeletons_folder"]) DST_ALL_SKELETONS_TXT = par(cfg["output"]["all_skeletons_txt"]) IDX_PERSON = 0 # Only use the skeleton of the 0th person in each image IDX_ACTION_LABEL = 3 # [1, 7, 54, "jump", "jump_03-02-12-34-01-795/00240.jpg"] # -- Helper function def read_skeletons_from_ith_txt(i): ''' Arguments: i {int}: the ith skeleton txt. Zero-based index. If there are mutliple people, then there are multiple skeletons' data in this txt. Return: skeletons_in_ith_txt {list of list}: Length of each skeleton data is supposed to be 56 = 5 image info + 51 xyz positions. ''' filename = SRC_DETECTED_SKELETONS_FOLDER + \ SKELETON_FILENAME_FORMAT.format(i) skeletons_in_ith_txt = lib_commons.read_listlist(filename) return skeletons_in_ith_txt def
(filepaths): ''' Find a non-empty txt file, and then get the length of one skeleton data. The data length should be 59, where: 59 = 5 + 54. 5: [cnt_action, cnt_clip, cnt_image, action_label, filepath] See utils.lib_io.get_training_imgs_info for more details 54: 18 joints * 3 xyz positions ''' for i in range(len(filepaths)): skeletons = read_skeletons_from_ith_txt(i) if len(skeletons): skeleton = skeletons[IDX_PERSON] data_size = len(skeleton) assert(data_size == 59) #MODIFIED return data_size raise RuntimeError(f"No valid txt under: {SRC_DETECTED_SKELETONS_FOLDER}.") # -- Main if __name__ == "__main__": ''' Read multiple skeletons txts and saved them into a single txt. ''' # -- Get skeleton filenames filepaths = lib_commons.get_filenames(SRC_DETECTED_SKELETONS_FOLDER, use_sort=True, with_folder_path=True) num_skeletons = len(filepaths) # -- Check data length of one skeleton data_length = get_length_of_one_skeleton_data(filepaths) print("Data length of one skeleton is {data_length}") # -- Read in skeletons and push to all_skeletons all_skeletons = [] labels_cnt = collections.defaultdict(int) for i in range(num_skeletons): # Read skeletons from a txt skeletons = read_skeletons_from_ith_txt(i) if not skeletons: # If empty, discard this image. continue skeleton = skeletons[IDX_PERSON] label = skeleton[IDX_ACTION_LABEL] if label not in CLASSES: # If invalid label, discard this image. continue labels_cnt[label] += 1 # Push to result all_skeletons.append(skeleton) # Print if i == 1 or i % 100 == 0: print("{}/{}".format(i, num_skeletons)) # -- Save to txt with open(DST_ALL_SKELETONS_TXT, 'w') as f: simplejson.dump(all_skeletons, f) print(f"There are {len(all_skeletons)} skeleton data.") print(f"They are saved to {DST_ALL_SKELETONS_TXT}") print("Number of each action: ") for label in CLASSES: print(f" {label}: {labels_cnt[label]}")
get_length_of_one_skeleton_data
global.d.ts
import { E2EApi } from './e2eApi';
}
declare global { const e2e: E2EApi;
index.ts
export * from './domain/area'; export * from './domain/base_model'; export * from './domain/brand'; export * from './domain/color'; export * from './domain/customer'; export * from './domain/documents'; export * from './domain/image'; export * from './domain/inventory'; export * from './domain/order_detail'; export * from './domain/order_status'; export * from './domain/order'; export * from './domain/product'; export * from './domain/product_category'; export * from './domain/product_supplier'; export * from './domain/purchase_order_item'; export * from './domain/purchase_order'; export * from './domain/sale_order_item';
export * from './domain/stock'; export * from './domain/location'; export * from './domain/supplier'; export * from './domain/tag'; export * from './domain/transaction_history'; export * from './domain/transaction'; // export common export * from './common'; // export constants export * from './constants';
export * from './domain/sale_order'; export * from './domain/size'; export * from './domain/staff';
multicast.rs
use super::{Notifier as NotifierTrait, Result, ResultExt}; use crate::config; use crate::config::ValueExt; use crate::protocol::{Packet, Event}; use std::net::{UdpSocket, IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}; pub struct Notifier { bind_addr: SocketAddr, addr: SocketAddr } impl NotifierTrait for Notifier { fn from_config (notifier: &config::NotifierConfig) -> Result<Self> where Self: Sized {
// Get addr and bind_addr let addr = config .get_as_str_or_invalid_key ("notifier.multicast.addr") .chain_err (|| "failed to find an address for the notifier 'multicast'")? .to_socket_addrs() .chain_err (|| "failed to parse 'notifier.multicast.addr' as a socket address")? .find (|&addr| addr.is_ipv4() && addr.ip().is_multicast()) .chain_err (|| "failed to find an IPv4 multicast address for 'notifier.multicast.addr'")?; let bind_addr = config .get_as_str_or_invalid_key ("notifier.multicast.bind_addr") .chain_err (|| "failed to find a bind address for the notifier 'multicast'")? .to_socket_addrs() .chain_err (|| "failed to parse 'notifier.multicast.bind_addr' as a socket address")? .find (|&addr| addr.is_ipv4()) .chain_err (|| "failed to find an IPv4 address for 'notifier.multicast.bind_addr'")?; trace!(target: "notifier::multicast", "initialized, addr = {}, bind_addr = {}", addr, bind_addr); Ok(Self { addr, bind_addr }) } fn notify (&mut self, event: Event) -> Result<()> { let socket = UdpSocket::bind (self.bind_addr) .chain_err (|| format!("failed to bind to {}", self.bind_addr))?; let mut vec: Vec<u8> = Vec::new(); Packet::Event(event).write (&mut vec) .chain_err (|| format!("failed to write event packet '{}' to a local buffer", event))?; socket.send_to (&vec, self.addr) .chain_err (|| format!("failed to send event packet '{}' to {}", event, self.addr))?; debug!(target: "notifier::multicast", "successfully notified event \"{}\"", event); Ok(()) } fn listen(&mut self, on_event: &dyn Fn(Event, Option<SocketAddr>) -> ()) -> Result<()> { let any = Ipv4Addr::new (0, 0, 0, 0); let socket = UdpSocket::bind (self.bind_addr) .chain_err (|| format!("failed to bind to {}", self.bind_addr))?; socket .join_multicast_v4 (match self.addr.ip() { IpAddr::V4(ref ip) => ip, IpAddr::V6(..) => panic!("Got IPv6 address when expecting IPv4") }, &any) .chain_err (|| format!("failed to join multicast group '{}'", self.addr))?; let mut buf = vec![0; 3]; // for now only support 2-byte packets loop { let (number_of_bytes, src_addr) = socket.recv_from (&mut buf) .chain_err (|| "failed to receive data from multicast socket")?; let mut slice = &buf[..number_of_bytes]; match Packet::read (&mut slice) { Ok(packet) => { if let Packet::Event(event) = packet { debug!(target: "notifier::multicast", "received event \"{}\"", event); on_event(event, Some(src_addr)) } }, Err(error) => warn!(target: "notifier::multicast", "can't decode incoming packet: {}", error) } } } }
let config = notifier.config.as_ref() .chain_err (|| config::ErrorKind::MissingOption ("notifier.multicast")) .chain_err (|| "the notifier 'multicast' requires to be configured")?;
__init__.py
''' Copyright 2018 Riverstone Software, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. '''
You may obtain a copy of the License at
https.rs
use std::net::SocketAddr; use crate::name_server::RuntimeProvider; use crate::tls::CLIENT_CONFIG; use proto::xfer::{DnsExchange, DnsExchangeConnect}; use proto::TokioTime; use trust_dns_https::{HttpsClientConnect, HttpsClientStream, HttpsClientStreamBuilder}; use crate::config::TlsClientConfig; #[allow(clippy::type_complexity)] pub(crate) fn new_https_stream<R>( socket_addr: SocketAddr, dns_name: String, client_config: Option<TlsClientConfig>, ) -> DnsExchangeConnect<HttpsClientConnect<R::Tcp>, HttpsClientStream, TokioTime> where R: RuntimeProvider, { let client_config = client_config.map_or_else( || CLIENT_CONFIG.clone(), |TlsClientConfig(client_config)| client_config, ); let https_builder = HttpsClientStreamBuilder::with_client_config(client_config); DnsExchange::connect(https_builder.build::<R::Tcp>(socket_addr, dns_name)) } #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tokio::runtime::Runtime; use crate::config::{ResolverConfig, ResolverOpts}; use crate::TokioAsyncResolver; fn https_test(config: ResolverConfig)
#[test] fn test_cloudflare_https() { https_test(ResolverConfig::cloudflare_https()) } }
{ //env_logger::try_init().ok(); let mut io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new(config, ResolverOpts::default(), io_loop.handle().clone()) .expect("failed to create resolver"); let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } // check if there is another connection created let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } }
sync.rs
#![feature(test)] mod sync { extern crate random_access_disk as rad; extern crate tempdir; extern crate test; use self::tempdir::TempDir; use self::test::Bencher; #[bench] fn write_hello_world(b: &mut Bencher) {
file.write(0, b"hello").unwrap(); file.write(5, b" world").unwrap(); }); } #[bench] fn read_hello_world(b: &mut Bencher) { let dir = TempDir::new("random-access-disk").unwrap(); let mut file = rad::Sync::new(dir.path().join("2.db")); file.write(0, b"hello").unwrap(); file.write(5, b" world").unwrap(); b.iter(|| { let _text = file.read(0, 11).unwrap(); }); } #[bench] fn read_write_hello_world(b: &mut Bencher) { let dir = TempDir::new("random-access-disk").unwrap(); let mut file = rad::Sync::new(dir.path().join("3.db")); b.iter(|| { file.write(0, b"hello").unwrap(); file.write(5, b" world").unwrap(); let _text = file.read(0, 11).unwrap(); }); } }
let dir = TempDir::new("random-access-disk").unwrap(); let mut file = rad::Sync::new(dir.path().join("1.db")); b.iter(|| {
TypeChecker.py
import inspect import typing from abc import ABC import builtins def get_builtins(): return list(filter(lambda x: not x.startswith('_'), dir(builtins))) class ITypeChecker(ABC): def is_class(self, obj): if inspect.isclass(obj) and not self.is_primitive(obj): return True return False def is_primitive(self, obj): builtins_list = list(filter(lambda x: not x.startswith('_'), dir(builtins))) return obj.__name__ in builtins_list def is_generic(self, class_type): pass def is_base_generic(self, class_type): pass # python 3.7 if hasattr(typing, '_GenericAlias'): class TypeChecker(ITypeChecker): def is_generic(self, class_type): return self._is_generic(class_type) def is_base_generic(self, class_type): return self._is_base_generic(class_type) def _is_generic(self, cls): if isinstance(cls, typing._GenericAlias): return True if isinstance(cls, typing._SpecialForm): return cls not in {typing.Any} return False def _is_base_generic(self, cls): if isinstance(cls, typing._GenericAlias): if cls.__origin__ in {typing.Generic, typing._Protocol}: return False if isinstance(cls, typing._VariadicGenericAlias): return True return len(cls.__parameters__) > 0 if isinstance(cls, typing._SpecialForm): return cls._name in {'ClassVar', 'Union', 'Optional'} return False elif hasattr(typing, '_Union'): class TypeChecker(ITypeChecker): # python 3.6 def is_generic(self, class_type): return self._is_generic(class_type) def is_base_generic(self, class_type): return self._is_base_generic(class_type) def _is_generic(self, cls): if isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)):
def _is_base_generic(self, cls): if isinstance(cls, (typing.GenericMeta, typing._Union)): return cls.__args__ in {None, ()} if isinstance(cls, typing._Optional): return True return False
return True return False
main_v2.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{Context as _, Error}, async_trait::async_trait, fdio, fidl::endpoints::{DiscoverableService, Proxy}, fidl_fuchsia_bluetooth_bredr::ProfileMarker, fidl_fuchsia_bluetooth_snoop::SnoopMarker, fidl_fuchsia_io::DirectoryProxy, fuchsia_async as fasync, fuchsia_component::{client, server}, futures::{future, StreamExt}, log::{error, info, warn}, }; mod config; const BT_GAP_CHILD_NAME: &str = "bt-gap"; const BT_RFCOMM_CHILD_NAME: &str = "bt-rfcomm"; #[async_trait] trait ComponentClientAdapter { async fn open_childs_exposed_directory( &mut self, child_name: String, ) -> Result<DirectoryProxy, Error>; } struct ComponentClient; #[async_trait] impl ComponentClientAdapter for ComponentClient { async fn open_childs_exposed_directory( &mut self, child_name: String, ) -> Result<DirectoryProxy, Error> { client::open_childs_exposed_directory(child_name, None).await } } /// Open the directory of the child which will underlie the Profile service. bt-rfcomm is the /// preferred Profile service provider if present, but if it is not we fall back to bt-gap. // // TODO(fxbug.dev/71315): A single bt-rfcomm instance won't function correctly in the presence
// modify bt-rfcomm to accommodate this issue. async fn open_childs_service_directory<C: ComponentClientAdapter>( component_client: &mut C, ) -> Result<DirectoryProxy, Error> { let underlying_profile_svc = component_client.open_childs_exposed_directory(BT_RFCOMM_CHILD_NAME.to_owned()).await; match underlying_profile_svc { Err(e) => { warn!( "failed to bind bt-rfcomm child ({:?}), falling back to bt-gap's Profile service", e ); component_client.open_childs_exposed_directory(BT_GAP_CHILD_NAME.to_owned()).await } dir => { info!("successfully opened bt-rfcomm svc directory"); dir } } } #[fuchsia::component(logging = true)] fn main() -> Result<(), Error> { info!("starting bt-init..."); let mut executor = fasync::LocalExecutor::new().context("error creating executor")?; let cfg = config::Config::load().context("error loading config")?; // Start bt-snoop service before anything else and hold onto the connection until bt-init exits. let snoop_connection; if cfg.autostart_snoop() { info!("starting snoop service..."); snoop_connection = client::connect_to_protocol::<SnoopMarker>(); if let Err(e) = snoop_connection { warn!("failed to start snoop service: {}", e); } else { info!("snoop service started successfully"); } } let run_bluetooth = async move { let underlying_profile_svc = open_childs_service_directory(&mut ComponentClient).await?; let mut fs = server::ServiceFs::new(); fs.dir("svc").add_service_at(ProfileMarker::SERVICE_NAME, |chan| { Some((ProfileMarker::SERVICE_NAME, chan)) }); fs.take_and_serve_directory_handle()?; info!("initialization complete, begin serving {}", ProfileMarker::SERVICE_NAME); let outer_fs = fs.for_each(move |(name, chan)| { if name == ProfileMarker::SERVICE_NAME { error!( "Received unexpected service {} when we only expect to serve {}", name, ProfileMarker::SERVICE_NAME ); return future::ready(()); } let _ = fdio::service_connect_at(underlying_profile_svc.as_channel().as_ref(), name, chan) .map_err(|e| warn!("error passing {} handle to service: {:?}", name, e)); future::ready(()) }); Ok::<(), Error>(outer_fs.await) }; executor .run_singlethreaded(run_bluetooth) .context("bt-init encountered an error during execution") } #[cfg(test)] mod tests { use super::*; use { anyhow::format_err, fuchsia_async::Channel as AsyncChannel, fuchsia_zircon as zx, std::collections::HashSet, }; struct MockComponentClient { pub children_to_fail_for: HashSet<String>, pub bt_gap_channel: Option<zx::Channel>, pub bt_rfcomm_channel: Option<zx::Channel>, } impl MockComponentClient { fn new() -> Self { Self { children_to_fail_for: HashSet::new(), bt_gap_channel: None, bt_rfcomm_channel: None, } } } #[async_trait] impl ComponentClientAdapter for MockComponentClient { async fn open_childs_exposed_directory( &mut self, child_name: String, ) -> Result<DirectoryProxy, Error> { if self.children_to_fail_for.contains(&child_name) { return Err(format_err!("couldn't open {}'s directory :/", &child_name)); } let (local, client) = zx::Channel::create()?; match child_name.as_str() { BT_RFCOMM_CHILD_NAME => self.bt_rfcomm_channel = Some(local), BT_GAP_CHILD_NAME => self.bt_gap_channel = Some(local), _ => panic!("MockComponentClient received unexpected child name: {}", child_name), } Ok(DirectoryProxy::from_channel(AsyncChannel::from_channel(client).unwrap())) } } fn assert_channels_connected(writer: &zx::Channel, reader: &zx::Channel) { let expected_bytes = [1, 2, 3, 4, 5]; writer.write(&expected_bytes, &mut []).unwrap(); let mut bytes = zx::MessageBuf::new(); reader.read(&mut bytes).unwrap(); assert_eq!(&expected_bytes, bytes.bytes()); } #[fuchsia::test] async fn test_open_rfcomm_works() { let mut mock_client = MockComponentClient::new(); // If opening bt-rfcomm's directory works, the directory should be connected to bt-rfcomm. let profile_svc = open_childs_service_directory(&mut mock_client).await.unwrap(); assert!(mock_client.bt_rfcomm_channel.is_some()); assert_channels_connected( mock_client.bt_rfcomm_channel.unwrap().as_ref(), profile_svc.as_channel().as_ref(), ); } #[fuchsia::test] async fn test_open_rfcomm_fails() { // If opening bt-rfcomm's directory fails, the directory should be connected to bt-gap. let mut mock_client = MockComponentClient::new(); mock_client.children_to_fail_for.insert(BT_RFCOMM_CHILD_NAME.to_owned()); let profile_svc = open_childs_service_directory(&mut mock_client).await.unwrap(); assert!(mock_client.bt_rfcomm_channel.is_none()); assert!(mock_client.bt_gap_channel.is_some()); assert_channels_connected( mock_client.bt_gap_channel.unwrap().as_ref(), profile_svc.as_channel().as_ref(), ); } #[fuchsia::test] async fn test_open_rfcomm_and_gap_fail() { // If opening both bt-gap and bt-rfcomm's directory fail, opening the profile service should fail. let mut mock_client = MockComponentClient::new(); mock_client.children_to_fail_for.insert(BT_RFCOMM_CHILD_NAME.to_owned()); mock_client.children_to_fail_for.insert(BT_GAP_CHILD_NAME.to_owned()); assert!(open_childs_service_directory(&mut mock_client).await.is_err()); } }
// of multiple bt-host devices during its lifetime. When handling this is a priority, we will // likely need to either launch an instance of bt-rfcomm per-bt-host (e.g. inside bt-gap), or
stddef.rs
use std::ffi::{c_void, CStr, CString}; use std::os::raw::c_char; use auxtools::*; #[cfg(windows)] static STDDEF_FN_SYMBOL: &[u8] = b"?StdDefDM@DungBuilder@@QAEPADXZ\0"; #[cfg(unix)] static STDDEF_FN_SYMBOL: &[u8] = b"_ZN11DungBuilder8StdDefDMEv\0"; static mut STDDEF: Option<&'static str> = None; #[init(full)] fn stddef_init() -> Result<(), String> { let stddef_fn: extern "C" fn(*const c_void) -> *const c_char; #[cfg(windows)] { use winapi::um::libloaderapi; unsafe {
if libloaderapi::GetModuleHandleExA(0, core_str.as_ptr(), &mut module) == 0 { return Err("Couldn't get module handle for BYONDCORE".into()); } let symbol = libloaderapi::GetProcAddress(module, STDDEF_FN_SYMBOL.as_ptr() as *const c_char); if symbol.is_null() { return Err("Couldn't find STDDEF_FN in BYONDCORE".into()); } stddef_fn = std::mem::transmute(symbol); } } #[cfg(unix)] { use libc::{dlopen, dlsym, RTLD_LAZY}; unsafe { let module = dlopen(CString::new(BYONDCORE).unwrap().as_ptr(), RTLD_LAZY); if module.is_null() { return Err("Couldn't get module handle for BYONDCORE".into()); } let symbol = dlsym(module, STDDEF_FN_SYMBOL.as_ptr() as *const c_char); if symbol.is_null() { return Err("Couldn't find STDDEF_FN in BYONDCORE".into()); } stddef_fn = std::mem::transmute(symbol); } } unsafe { match CStr::from_ptr(stddef_fn(std::ptr::null())).to_str() { Ok(str) => STDDEF = Some(str), Err(e) => { return Err(format!("Couldn't convert STDDEF from CStr: {}", e)); } } } Ok(()) } pub fn get_stddef() -> Option<&'static str> { unsafe { STDDEF } }
let mut module = std::ptr::null_mut(); let core_str = CString::new(BYONDCORE).unwrap();
event_track_quiz.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import http from odoo.addons.website_event_track.controllers.event_track import EventTrackController from odoo.http import request class WebsiteEventTrackQuiz(EventTrackController): # QUIZZES IN PAGE # ---------------------------------------------------------- @http.route('/event_track/quiz/submit', type="json", auth="public", website=True) def event_track_quiz_submit(self, event_id, track_id, answer_ids):
@http.route('/event_track/quiz/reset', type="json", auth="user", website=True) def quiz_reset(self, event_id, track_id): track = self._fetch_track(track_id) event_track_visitor = track._get_event_track_visitors(force_create=True) event_track_visitor.write({ 'quiz_completed': False, 'quiz_points': 0, }) def _get_quiz_answers_details(self, track, answer_ids): # TDE FIXME: lost sudo questions_count = request.env['event.quiz.question'].sudo().search_count([('quiz_id', '=', track.sudo().quiz_id.id)]) user_answers = request.env['event.quiz.answer'].sudo().search([('id', 'in', answer_ids)]) if len(user_answers.mapped('question_id')) != questions_count: return {'error': 'quiz_incomplete'} return { 'user_answers': user_answers, 'points': sum([ answer.awarded_points for answer in user_answers.filtered(lambda answer: answer.is_correct) ]) }
track = self._fetch_track(track_id) event_track_visitor = track._get_event_track_visitors(force_create=True) visitor_sudo = event_track_visitor.visitor_id if event_track_visitor.quiz_completed: return {'error': 'track_quiz_done'} answers_details = self._get_quiz_answers_details(track, answer_ids) if answers_details.get('error'): return answers_details event_track_visitor.write({ 'quiz_completed': True, 'quiz_points': answers_details['points'], }) result = { 'answers': { answer.question_id.id: { 'is_correct': answer.is_correct, 'comment': answer.comment } for answer in answers_details['user_answers'] }, 'quiz_completed': event_track_visitor.quiz_completed, 'quiz_points': answers_details['points'] } if visitor_sudo and request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token: result['visitor_uuid'] = visitor_sudo.access_token return result
scalar.rs
//! Scalar field arithmetic modulo n = 115792089210356248762697446949407573529996955224135760342422259061068512044369 pub mod blinding; use crate::{FieldBytes, NistP256}; use core::{ convert::TryInto, ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, }; use elliptic_curve::{ ff::{Field, PrimeField}, generic_array::arr, rand_core::RngCore, subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}, util::{adc64, mac64, sbb64}, }; #[cfg(feature = "digest")] use ecdsa_core::{generic_array::typenum::U32, hazmat::FromDigest, signature::digest::Digest}; #[cfg(feature = "zeroize")] use crate::SecretKey; #[cfg(feature = "zeroize")] use elliptic_curve::zeroize::Zeroize; /// The number of 64-bit limbs used to represent a [`Scalar`]. const LIMBS: usize = 4; type U256 = [u64; LIMBS]; /// Constant representing the modulus /// n = FFFFFFFF 00000000 FFFFFFFF FFFFFFFF BCE6FAAD A7179E84 F3B9CAC2 FC632551 // One way to calculate the modulus is with `GP/PARI`: // ``` // p = (2^224) * (2^32 - 1) + 2^192 + 2^96 - 1 // b = 41058363725152142129326129780047268409114441015993725554835256314039467401291 // E = ellinit([Mod(-3, p), Mod(b, p)]) // default(parisize, 120000000) // n = ellsea(E) // isprime(n) // ``` pub const MODULUS: U256 = [ 0xf3b9_cac2_fc63_2551, 0xbce6_faad_a717_9e84, 0xffff_ffff_ffff_ffff, 0xffff_ffff_0000_0000, ]; const MODULUS_SHR1: U256 = [ 0x79dc_e561_7e31_92a8, 0xde73_7d56_d38b_cf42, 0x7fff_ffff_ffff_ffff, 0x7fff_ffff_8000_0000, ]; /// MU = floor(2^512 / n) /// = 115792089264276142090721624801893421302707618245269942344307673200490803338238 /// = 0x100000000fffffffffffffffeffffffff43190552df1a6c21012ffd85eedf9bfe pub const MU: [u64; 5] = [ 0x012f_fd85_eedf_9bfe, 0x4319_0552_df1a_6c21, 0xffff_fffe_ffff_ffff, 0x0000_0000_ffff_ffff, 0x0000_0000_0000_0001, ]; /// Non-zero scalar value. pub type NonZeroScalar = elliptic_curve::NonZeroScalar<NistP256>; /// NIST P-256 field element serialized as bits. pub type ScalarBits = elliptic_curve::ScalarBits<NistP256>; /// An element in the finite field modulo n. // The internal representation is as little-endian ordered u64 words. #[derive(Clone, Copy, Debug, Default)] #[cfg_attr(docsrs, doc(cfg(feature = "arithmetic")))] pub struct Scalar(pub(crate) U256); impl Field for Scalar { fn random(mut rng: impl RngCore) -> Self { let mut bytes = FieldBytes::default(); // Generate a uniformly random scalar using rejection sampling, // which produces a uniformly random distribution of scalars. // // This method is not constant time, but should be secure so long as // rejected RNG outputs are unrelated to future ones (which is a // necessary property of a `CryptoRng`). // // With an unbiased RNG, the probability of failing to complete after 4 // iterations is vanishingly small. loop { rng.fill_bytes(&mut bytes); if let Some(scalar) = Scalar::from_repr(bytes) { return scalar; } } } fn zero() -> Self { Scalar::zero() } fn one() -> Self { Scalar::one() } fn is_zero(&self) -> bool { self.is_zero().into() } #[must_use] fn square(&self) -> Self { Scalar::square(self) } #[must_use] fn double(&self) -> Self { self.add(self) } fn invert(&self) -> CtOption<Self> { Scalar::invert(self) } // TODO(tarcieri); stub! See: https://github.com/RustCrypto/elliptic-curves/issues/170 fn sqrt(&self) -> CtOption<Self> { todo!("see RustCrypto/elliptic-curves#170"); } } impl PrimeField for Scalar { type Repr = FieldBytes; #[cfg(target_pointer_width = "32")] type ReprBits = [u32; 8]; #[cfg(target_pointer_width = "64")] type ReprBits = [u64; 4]; const NUM_BITS: u32 = 256; const CAPACITY: u32 = 255; const S: u32 = 4; /// Attempts to parse the given byte array as an SEC1-encoded scalar. /// /// Returns None if the byte array does not contain a big-endian integer in the range /// [0, p). fn from_repr(bytes: FieldBytes) -> Option<Self> { let mut w = [0u64; LIMBS]; // Interpret the bytes as a big-endian integer w. w[3] = u64::from_be_bytes(bytes[0..8].try_into().unwrap()); w[2] = u64::from_be_bytes(bytes[8..16].try_into().unwrap()); w[1] = u64::from_be_bytes(bytes[16..24].try_into().unwrap()); w[0] = u64::from_be_bytes(bytes[24..32].try_into().unwrap()); // If w is in the range [0, n) then w - n will overflow, resulting in a borrow // value of 2^64 - 1. let (_, borrow) = sbb64(w[0], MODULUS[0], 0); let (_, borrow) = sbb64(w[1], MODULUS[1], borrow); let (_, borrow) = sbb64(w[2], MODULUS[2], borrow); let (_, borrow) = sbb64(w[3], MODULUS[3], borrow); let is_some = (borrow as u8) & 1; CtOption::new(Scalar(w), Choice::from(is_some)).into() } fn to_repr(&self) -> FieldBytes { self.to_bytes() } fn to_le_bits(&self) -> ScalarBits { self.into() } fn is_odd(&self) -> bool { self.0[0] as u8 == 1 } #[cfg(target_pointer_width = "32")] fn char_le_bits() -> ScalarBits { [ 0xfc63_2551, 0xf3b9_cac2, 0xa717_9e84, 0xbce6_faad, 0xffff_ffff, 0xffff_ffff, 0x0000_0000, 0xffff_ffff, ] .into() } #[cfg(target_pointer_width = "64")] fn char_le_bits() -> ScalarBits { MODULUS.into() } fn multiplicative_generator() -> Self { 7u64.into() } fn root_of_unity() -> Self { Scalar::from_repr(arr![u8; 0xff, 0xc9, 0x7f, 0x06, 0x2a, 0x77, 0x09, 0x92, 0xba, 0x80, 0x7a, 0xce, 0x84, 0x2a, 0x3d, 0xfc, 0x15, 0x46, 0xca, 0xd0, 0x04, 0x37, 0x8d, 0xaf, 0x05, 0x92, 0xd7, 0xfb, 0xb4, 0x1e, 0x66, 0x00, ]) .unwrap() } } impl From<u64> for Scalar { fn from(k: u64) -> Self { Scalar([k, 0, 0, 0]) } } impl PartialEq for Scalar { fn eq(&self, other: &Self) -> bool { self.ct_eq(other).into() } } impl Eq for Scalar {} impl PartialOrd for Scalar { fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { Some(self.cmp(other)) } } /// Returns sign of left - right fn cmp_vartime(left: &U256, right: &U256) -> i32 { use core::cmp::Ordering::*; // since we're little-endian, need to reverse iterate for (l, r) in left.iter().rev().zip(right.iter().rev()) { match l.cmp(r) { Less => return -1, Greater => return 1, Equal => continue, } } 0 } fn shr1(u256: &mut U256) { let mut bit: u64 = 0; for digit in u256.iter_mut().rev() { let new_digit = (bit << 63) | (*digit >> 1); bit = *digit & 1; *digit = new_digit; } } impl Ord for Scalar { fn cmp(&self, other: &Self) -> core::cmp::Ordering { use core::cmp::Ordering::*; match cmp_vartime(&self.0, &other.0) { -1 => Less, 0 => Equal, 1 => Greater, _ => unreachable!(), } } } #[cfg(feature = "digest")] #[cfg_attr(docsrs, doc(cfg(feature = "digest")))] impl FromDigest<NistP256> for Scalar { /// Convert the output of a digest algorithm into a [`Scalar`] reduced /// modulo n. fn from_digest<D>(digest: D) -> Self where D: Digest<OutputSize = U32>, { Self::from_bytes_reduced(&digest.finalize()) } } impl Scalar { /// Returns the zero scalar. pub const fn zero() -> Scalar { Scalar([0, 0, 0, 0]) } /// Returns the multiplicative identity. pub const fn one() -> Scalar { Scalar([1, 0, 0, 0]) } /// Parses the given byte array as a scalar. /// /// Subtracts the modulus when the byte array is larger than the modulus. pub fn from_bytes_reduced(bytes: &FieldBytes) -> Self { Self::sub_inner( u64::from_be_bytes(bytes[24..32].try_into().unwrap()), u64::from_be_bytes(bytes[16..24].try_into().unwrap()), u64::from_be_bytes(bytes[8..16].try_into().unwrap()), u64::from_be_bytes(bytes[0..8].try_into().unwrap()), 0, MODULUS[0], MODULUS[1], MODULUS[2], MODULUS[3], 0, ) } /// Returns the SEC1 encoding of this scalar. pub fn to_bytes(&self) -> FieldBytes { let mut ret = FieldBytes::default(); ret[0..8].copy_from_slice(&self.0[3].to_be_bytes()); ret[8..16].copy_from_slice(&self.0[2].to_be_bytes()); ret[16..24].copy_from_slice(&self.0[1].to_be_bytes()); ret[24..32].copy_from_slice(&self.0[0].to_be_bytes()); ret } /// Determine if this `Scalar` is zero. /// /// # Returns /// /// If zero, return `Choice(1)`. Otherwise, return `Choice(0)`. pub fn is_zero(&self) -> Choice { self.ct_eq(&Scalar::zero()) } /// Returns self + rhs mod n pub const fn add(&self, rhs: &Self) -> Self { // Bit 256 of n is set, so addition can result in five words. let (w0, carry) = adc64(self.0[0], rhs.0[0], 0); let (w1, carry) = adc64(self.0[1], rhs.0[1], carry); let (w2, carry) = adc64(self.0[2], rhs.0[2], carry); let (w3, w4) = adc64(self.0[3], rhs.0[3], carry); // Attempt to subtract the modulus, to ensure the result is in the field. Self::sub_inner( w0, w1, w2, w3, w4, MODULUS[0], MODULUS[1], MODULUS[2], MODULUS[3], 0, ) } /// Returns 2*self. pub const fn double(&self) -> Self { self.add(self) } /// Returns self - rhs mod n pub const fn subtract(&self, rhs: &Self) -> Self { Self::sub_inner( self.0[0], self.0[1], self.0[2], self.0[3], 0, rhs.0[0], rhs.0[1], rhs.0[2], rhs.0[3], 0, ) } #[inline] #[allow(clippy::too_many_arguments)] const fn sub_inner( l0: u64, l1: u64, l2: u64, l3: u64, l4: u64, r0: u64, r1: u64, r2: u64, r3: u64, r4: u64, ) -> Self { let (w0, borrow) = sbb64(l0, r0, 0); let (w1, borrow) = sbb64(l1, r1, borrow); let (w2, borrow) = sbb64(l2, r2, borrow); let (w3, borrow) = sbb64(l3, r3, borrow); let (_, borrow) = sbb64(l4, r4, borrow); // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the // modulus. let (w0, carry) = adc64(w0, MODULUS[0] & borrow, 0); let (w1, carry) = adc64(w1, MODULUS[1] & borrow, carry); let (w2, carry) = adc64(w2, MODULUS[2] & borrow, carry); let (w3, _) = adc64(w3, MODULUS[3] & borrow, carry); Scalar([w0, w1, w2, w3]) } /// Barrett Reduction /// /// The general algorithm is: /// ```text /// p = n = order of group /// b = 2^64 = 64bit machine word /// k = 4 /// a \in [0, 2^512] /// mu := floor(b^{2k} / p) /// q1 := floor(a / b^{k - 1}) /// q2 := q1 * mu /// q3 := <- floor(a / b^{k - 1}) /// r1 := a mod b^{k + 1} /// r2 := q3 * m mod b^{k + 1} /// r := r1 - r2 /// /// if r < 0: r := r + b^{k + 1} /// while r >= p: do r := r - p (at most twice) /// ``` /// /// References: /// - Handbook of Applied Cryptography, Chapter 14 /// Algorithm 14.42 /// http://cacr.uwaterloo.ca/hac/about/chap14.pdf /// /// - Efficient and Secure Elliptic Curve Cryptography Implementation of Curve P-256 /// Algorithm 6) Barrett Reduction modulo p /// https://csrc.nist.gov/csrc/media/events/workshop-on-elliptic-curve-cryptography-standards/documents/papers/session6-adalier-mehmet.pdf #[inline] #[allow(clippy::too_many_arguments)] const fn barrett_reduce( a0: u64, a1: u64, a2: u64, a3: u64, a4: u64, a5: u64, a6: u64, a7: u64, ) -> Self { let q1: [u64; 5] = [a3, a4, a5, a6, a7]; const fn q1_times_mu_shift_five(q1: &[u64; 5]) -> [u64; 5] { // Schoolbook multiplication. let (_w0, carry) = mac64(0, q1[0], MU[0], 0); let (w1, carry) = mac64(0, q1[0], MU[1], carry); let (w2, carry) = mac64(0, q1[0], MU[2], carry); let (w3, carry) = mac64(0, q1[0], MU[3], carry); let (w4, w5) = mac64(0, q1[0], MU[4], carry); let (_w1, carry) = mac64(w1, q1[1], MU[0], 0); let (w2, carry) = mac64(w2, q1[1], MU[1], carry); let (w3, carry) = mac64(w3, q1[1], MU[2], carry); let (w4, carry) = mac64(w4, q1[1], MU[3], carry); let (w5, w6) = mac64(w5, q1[1], MU[4], carry); let (_w2, carry) = mac64(w2, q1[2], MU[0], 0); let (w3, carry) = mac64(w3, q1[2], MU[1], carry); let (w4, carry) = mac64(w4, q1[2], MU[2], carry); let (w5, carry) = mac64(w5, q1[2], MU[3], carry); let (w6, w7) = mac64(w6, q1[2], MU[4], carry); let (_w3, carry) = mac64(w3, q1[3], MU[0], 0); let (w4, carry) = mac64(w4, q1[3], MU[1], carry); let (w5, carry) = mac64(w5, q1[3], MU[2], carry); let (w6, carry) = mac64(w6, q1[3], MU[3], carry); let (w7, w8) = mac64(w7, q1[3], MU[4], carry); let (_w4, carry) = mac64(w4, q1[4], MU[0], 0); let (w5, carry) = mac64(w5, q1[4], MU[1], carry); let (w6, carry) = mac64(w6, q1[4], MU[2], carry); let (w7, carry) = mac64(w7, q1[4], MU[3], carry); let (w8, w9) = mac64(w8, q1[4], MU[4], carry); // let q2 = [_w0, _w1, _w2, _w3, _w4, w5, w6, w7, w8, w9]; [w5, w6, w7, w8, w9] } let q3 = q1_times_mu_shift_five(&q1); let r1: [u64; 5] = [a0, a1, a2, a3, a4]; const fn q3_times_n_keep_five(q3: &[u64; 5]) -> [u64; 5] { // Schoolbook multiplication. let (w0, carry) = mac64(0, q3[0], MODULUS[0], 0); let (w1, carry) = mac64(0, q3[0], MODULUS[1], carry); let (w2, carry) = mac64(0, q3[0], MODULUS[2], carry); let (w3, carry) = mac64(0, q3[0], MODULUS[3], carry); let (w4, _) = mac64(0, q3[0], 0, carry); let (w1, carry) = mac64(w1, q3[1], MODULUS[0], 0); let (w2, carry) = mac64(w2, q3[1], MODULUS[1], carry); let (w3, carry) = mac64(w3, q3[1], MODULUS[2], carry); let (w4, _) = mac64(w4, q3[1], MODULUS[3], carry); let (w2, carry) = mac64(w2, q3[2], MODULUS[0], 0); let (w3, carry) = mac64(w3, q3[2], MODULUS[1], carry); let (w4, _) = mac64(w4, q3[2], MODULUS[2], carry); let (w3, carry) = mac64(w3, q3[3], MODULUS[0], 0); let (w4, _) = mac64(w4, q3[3], MODULUS[1], carry); let (w4, _) = mac64(w4, q3[4], MODULUS[0], 0); [w0, w1, w2, w3, w4] } let r2: [u64; 5] = q3_times_n_keep_five(&q3); #[inline] #[allow(clippy::too_many_arguments)] const fn sub_inner_five(l: [u64; 5], r: [u64; 5]) -> [u64; 5] { let (w0, borrow) = sbb64(l[0], r[0], 0); let (w1, borrow) = sbb64(l[1], r[1], borrow); let (w2, borrow) = sbb64(l[2], r[2], borrow); let (w3, borrow) = sbb64(l[3], r[3], borrow); let (w4, _borrow) = sbb64(l[4], r[4], borrow); // If underflow occurred on the final limb - don't care (= add b^{k+1}). [w0, w1, w2, w3, w4] } let r: [u64; 5] = sub_inner_five(r1, r2); #[inline] #[allow(clippy::too_many_arguments)] const fn subtract_n_if_necessary(r0: u64, r1: u64, r2: u64, r3: u64, r4: u64) -> [u64; 5] { let (w0, borrow) = sbb64(r0, MODULUS[0], 0); let (w1, borrow) = sbb64(r1, MODULUS[1], borrow); let (w2, borrow) = sbb64(r2, MODULUS[2], borrow); let (w3, borrow) = sbb64(r3, MODULUS[3], borrow); let (w4, borrow) = sbb64(r4, 0, borrow); // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the // modulus. let (w0, carry) = adc64(w0, MODULUS[0] & borrow, 0); let (w1, carry) = adc64(w1, MODULUS[1] & borrow, carry); let (w2, carry) = adc64(w2, MODULUS[2] & borrow, carry); let (w3, carry) = adc64(w3, MODULUS[3] & borrow, carry); let (w4, _carry) = adc64(w4, 0, carry); [w0, w1, w2, w3, w4] } // Result is in range (0, 3*n - 1), // and 90% of the time, no subtraction will be needed. let r = subtract_n_if_necessary(r[0], r[1], r[2], r[3], r[4]); let r = subtract_n_if_necessary(r[0], r[1], r[2], r[3], r[4]); Scalar([r[0], r[1], r[2], r[3]]) } /// Returns self * rhs mod n pub const fn mul(&self, rhs: &Self) -> Self { // Schoolbook multiplication. let (w0, carry) = mac64(0, self.0[0], rhs.0[0], 0); let (w1, carry) = mac64(0, self.0[0], rhs.0[1], carry); let (w2, carry) = mac64(0, self.0[0], rhs.0[2], carry); let (w3, w4) = mac64(0, self.0[0], rhs.0[3], carry); let (w1, carry) = mac64(w1, self.0[1], rhs.0[0], 0); let (w2, carry) = mac64(w2, self.0[1], rhs.0[1], carry); let (w3, carry) = mac64(w3, self.0[1], rhs.0[2], carry); let (w4, w5) = mac64(w4, self.0[1], rhs.0[3], carry); let (w2, carry) = mac64(w2, self.0[2], rhs.0[0], 0); let (w3, carry) = mac64(w3, self.0[2], rhs.0[1], carry); let (w4, carry) = mac64(w4, self.0[2], rhs.0[2], carry); let (w5, w6) = mac64(w5, self.0[2], rhs.0[3], carry); let (w3, carry) = mac64(w3, self.0[3], rhs.0[0], 0); let (w4, carry) = mac64(w4, self.0[3], rhs.0[1], carry); let (w5, carry) = mac64(w5, self.0[3], rhs.0[2], carry); let (w6, w7) = mac64(w6, self.0[3], rhs.0[3], carry); Scalar::barrett_reduce(w0, w1, w2, w3, w4, w5, w6, w7) } /// Returns self * self mod p pub const fn square(&self) -> Self { // Schoolbook multiplication. self.mul(self) } /// Returns `self^by`, where `by` is a little-endian integer exponent. /// /// **This operation is variable time with respect to the exponent.** If the exponent /// is fixed, this operation is effectively constant time. pub fn pow_vartime(&self, by: &[u64; 4]) -> Self { let mut res = Self::one(); for e in by.iter().rev() { for i in (0..64).rev() { res = res.square(); if ((*e >> i) & 1) == 1 { res *= self; } } } res } /// Returns the multiplicative inverse of self, if self is non-zero pub fn invert(&self) -> CtOption<Self> { // We need to find b such that b * a ≡ 1 mod p. As we are in a prime // field, we can apply Fermat's Little Theorem: // // a^p ≡ a mod p // a^(p-1) ≡ 1 mod p // a^(p-2) * a ≡ 1 mod p // // Thus inversion can be implemented with a single exponentiation. // // This is `n - 2`, so the top right two digits are `4f` instead of `51`. let inverse = self.pow_vartime(&[ 0xf3b9_cac2_fc63_254f, 0xbce6_faad_a717_9e84, 0xffff_ffff_ffff_ffff, 0xffff_ffff_0000_0000, ]); CtOption::new(inverse, !self.is_zero()) } /// Is integer representing equivalence class odd pub fn is_odd(&self) -> Choice { ((self.0[0] & 1) as u8).into() } /// Is integer representing equivalence class even pub fn is_even(&self) -> Choice { !self.is_odd() } fn shr1(&mut self) { shr1(&mut self.0); } /// Faster inversion using Stein's algorithm pub fn invert_vartime(&self) -> CtOption<Self> { // https://link.springer.com/article/10.1007/s13389-016-0135-4 let mut u = *self; // currently an invalid scalar let mut v = Scalar(MODULUS); #[allow(non_snake_case)] let mut A = Self::one(); #[allow(non_snake_case)] let mut C = Self::zero(); while !bool::from(u.is_zero()) { // u-loop while bool::from(u.is_even()) { u.shr1(); if bool::from(A.is_even()) { A.shr1(); } else { A.shr1(); A += Scalar(MODULUS_SHR1); A += Self::one(); } } // v-loop while bool::from(v.is_even()) { v.shr1(); if bool::from(C.is_even()) { C.shr1(); } else { C.shr1(); C += Scalar(MODULUS_SHR1); C += Self::one(); } } // sub-step if u >= v { u -= &v; A -= &C; } else { v -= &u; C -= &A; } } CtOption::new(C, !self.is_zero()) } } impl Add<Scalar> for Scalar { type Output = Scalar; fn add(self, other: Scalar) -> Scalar { Scalar::add(&self, &other) } } impl Add<&Scalar> for &Scalar { type Output = Scalar; fn add(self, other: &Scalar) -> Scalar { Scalar::add(self, other) } } impl Add<&Scalar> for Scalar { type Output = Scalar; fn add(self, other: &Scalar) -> Scalar { Scalar::add(&self, other) } } impl AddAssign<Scalar> for Scalar { fn add_assign(&mut self, rhs: Scalar) { *self = Scalar::add(self, &rhs); } } impl AddAssign<&Scalar> for Scalar { fn add_assign(&mut self, rhs: &Scalar) { *self = Scalar::add(self, rhs); } } impl Sub<Scalar> for Scalar { type Output = Scalar; fn sub(self, other: Scalar) -> Scalar { Scalar::subtract(&self, &other) } } impl Sub<&Scalar> for &Scalar { type Output = Scalar; fn sub(self, other: &Scalar) -> Scalar { Scalar::subtract(self, other) } } impl Sub<&Scalar> for Scalar { type Output = Scalar; fn sub(self, other: &Scalar) -> Scalar { Scalar::subtract(&self, other) } } impl SubAssign<Scalar> for Scalar { fn sub_assign(&mut self, rhs: Scalar) { *self = Scalar::subtract(self, &rhs); } } impl SubAssign<&Scalar> for Scalar { fn sub_assign(&mut self, rhs: &Scalar) { *self = Scalar::subtract(self, rhs); } } impl Mul<Scalar> for Scalar { type Output = Scalar; fn mul(self, other: Scalar) -> Scalar { Scalar::mul(&self, &other) } } impl Mul<&Scalar> for &Scalar { type Output = Scalar; fn mul(self, other: &Scalar) -> Scalar { Scalar::mul(self, other) } } impl Mul<&Scalar> for Scalar { type Output = Scalar; fn mul(self, other: &Scalar) -> Scalar { Scalar::mul(&self, other) } } impl MulAssign<Scalar> for Scalar { fn mul_assign(&mut self, rhs: Scalar) {
MulAssign<&Scalar> for Scalar { fn mul_assign(&mut self, rhs: &Scalar) { *self = Scalar::mul(self, rhs); } } impl Neg for Scalar { type Output = Scalar; fn neg(self) -> Scalar { Scalar::zero() - self } } impl<'a> Neg for &'a Scalar { type Output = Scalar; fn neg(self) -> Scalar { Scalar::zero() - self } } impl ConditionallySelectable for Scalar { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { Scalar([ u64::conditional_select(&a.0[0], &b.0[0], choice), u64::conditional_select(&a.0[1], &b.0[1], choice), u64::conditional_select(&a.0[2], &b.0[2], choice), u64::conditional_select(&a.0[3], &b.0[3], choice), ]) } } impl ConstantTimeEq for Scalar { fn ct_eq(&self, other: &Self) -> Choice { self.0[0].ct_eq(&other.0[0]) & self.0[1].ct_eq(&other.0[1]) & self.0[2].ct_eq(&other.0[2]) & self.0[3].ct_eq(&other.0[3]) } } #[cfg(target_pointer_width = "32")] impl From<&Scalar> for ScalarBits { fn from(scalar: &Scalar) -> ScalarBits { let mut output = [0u32; 8]; for (input, output) in scalar.0.iter().zip(output.chunks_mut(2)) { output[0] = (input & 0xFFFFFFFF) as u32; output[1] = (input >> 32) as u32; } output.into() } } #[cfg(target_pointer_width = "64")] impl From<&Scalar> for ScalarBits { fn from(scalar: &Scalar) -> ScalarBits { scalar.0.into() } } impl From<Scalar> for FieldBytes { fn from(scalar: Scalar) -> Self { scalar.to_bytes() } } impl From<&Scalar> for FieldBytes { fn from(scalar: &Scalar) -> Self { scalar.to_bytes() } } #[cfg(feature = "zeroize")] impl From<&SecretKey> for Scalar { fn from(secret_key: &SecretKey) -> Scalar { **secret_key.secret_scalar() } } #[cfg(feature = "zeroize")] impl Zeroize for Scalar { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } #[cfg(test)] mod tests { use super::Scalar; use crate::FieldBytes; use elliptic_curve::ff::PrimeField; #[cfg(feature = "zeroize")] use crate::SecretKey; #[test] fn from_to_bytes_roundtrip() { let k: u64 = 42; let mut bytes = FieldBytes::default(); bytes[24..].copy_from_slice(k.to_be_bytes().as_ref()); let scalar = Scalar::from_repr(bytes).unwrap(); assert_eq!(bytes, scalar.to_bytes()); } #[test] // Basic tests that multiplication works. fn multiply() { let one = Scalar::one(); let two = one + &one; let three = two + &one; let six = three + &three; assert_eq!(six, two * &three); let minus_two = -two; let minus_three = -three; assert_eq!(two, -minus_two); assert_eq!(minus_three * &minus_two, minus_two * &minus_three); assert_eq!(six, minus_two * &minus_three); } #[test] // Basic tests that scalar inversion works. fn invert() { let one = Scalar::one(); let three = one + &one + &one; let inv_three = three.invert().unwrap(); // println!("1/3 = {:x?}", &inv_three); assert_eq!(three * &inv_three, one); let minus_three = -three; // println!("-3 = {:x?}", &minus_three); let inv_minus_three = minus_three.invert().unwrap(); assert_eq!(inv_minus_three, -inv_three); // println!("-1/3 = {:x?}", &inv_minus_three); assert_eq!(three * &inv_minus_three, -one); } // Tests that a Scalar can be safely converted to a SecretKey and back #[test] #[cfg(feature = "zeroize")] fn from_ec_secret() { let scalar = Scalar::one(); let secret = SecretKey::from_bytes(scalar.to_bytes()).unwrap(); let rederived_scalar = Scalar::from(&secret); assert_eq!(scalar.0, rederived_scalar.0); } #[test] #[cfg(target_pointer_width = "32")] fn scalar_into_scalarbits() { use super::ScalarBits; let minus_one = ScalarBits::from([ 0xfc63_2550, 0xf3b9_cac2, 0xa717_9e84, 0xbce6_faad, 0xffff_ffff, 0xffff_ffff, 0x0000_0000, 0xffff_ffff, ]); let scalar_bits = ScalarBits::from(&-Scalar::from(1)); assert_eq!(minus_one, scalar_bits); } }
*self = Scalar::mul(self, &rhs); } } impl
tencent.py
# -*- coding: utf-8 -*- import scrapy import requests from ..items import TencentItem import json class TencentSpider(scrapy.Spider): name = 'tencent' allowed_domains = ['careers.tencent.com'] one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1608216394591&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex={}&pageSize=10&language=zh-cn&area=cn' two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1608216394591&postId={}&language=zh-cn' keyword = input('请输入关键字:') # 某个类别下第一页的url地址 start_urls = [one_url.format(keyword, 1)] def get_total(self): """获取某个类别下的总页数""" headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'} html = requests.get(url=self.one_url.format(self.keyword, 1), headers=headers).json() count = html['Data']['Count'] total = count // 10 if count % 10 == 0 else count // 10 + 1 return total def parse(self, response): """生成所有页的一级页面的url地址,交给调度器
" total = self.get_total() for index in range(1, total + 1): page_url = self.one_url.format(self.keyword, index) # dont_filter: 让交给调度器的此请求不参与去重 yield scrapy.Request(url=page_url, dont_filter=True, callback=self.parse_one_page) def parse_one_page(self, response): """一级页面解析: 提取postId的值,用于拼接职位详情页的地址""" one_html = json.loads(response.text) for one_job_dict in one_html['Data']['Posts']: post_id = one_job_dict['PostId'] job_info_url = self.two_url.format(post_id) # 把详情页的Url地址交给调度器入队列 yield scrapy.Request(url=job_info_url, callback=self.parse_two_page) def parse_two_page(self, response): # 获取响应内容,并转为python数据类型 html = json.loads(response.text) # 提取具体数据 item = TencentItem() item['job_name'] = html['Data']['RecruitPostName'] item['job_type'] = html['Data']['CategoryName'] item['job_duty'] = html['Data']['Responsibility'] item['job_require'] = html['Data']['Requirement'] item['job_add'] = html['Data']['LocationName'] item['job_time'] = html['Data']['LastUpdateTime'] # 至此,一条完整数据提取完成,交给项目管道去处理 yield item
入队列""
CNTK_distributed.py
import numpy as np import os import sys import cntk from cntk.layers import Convolution2D, MaxPooling, Dense, Dropout from utils import * import argparse from cntk.train.distributed import Communicator, mpi_communicator # Hyperparams EPOCHS = 1 BATCHSIZE = 64 * 4 LR = 0.01 MOMENTUM = 0.9 N_CLASSES = 10 def create_basic_model(input, out_dims):
def init_model(m): progress_writers = [cntk.logging.ProgressPrinter( freq=int(BATCHSIZE / 2), rank=cntk.train.distributed.Communicator.rank(), num_epochs=EPOCHS)] # Loss (dense labels); check if support for sparse labels loss = cntk.cross_entropy_with_softmax(m, labels) # Momentum SGD # https://github.com/Microsoft/CNTK/blob/master/Manual/Manual_How_to_use_learners.ipynb # unit_gain=False: momentum_direction = momentum*old_momentum_direction + gradient # if unit_gain=True then ...(1-momentum)*gradient local_learner = cntk.momentum_sgd(m.parameters, lr=cntk.learning_rate_schedule(LR, cntk.UnitType.minibatch) , momentum=cntk.momentum_schedule(MOMENTUM), unit_gain=False) distributed_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner) trainer = cntk.Trainer(m, (loss, cntk.classification_error(m, labels)), [distributed_learner], progress_writers) return trainer, distributed_learner parser = argparse.ArgumentParser() parser.add_argument('--input_dir') #parser.add_argument('--output_dir') print(sys.argv) args = parser.parse_args() # Data into format for library x_train, x_test, y_train, y_test = cifar_for_library(download_dir=args.input_dir, channel_first=True, one_hot=True) # CNTK format y_train = y_train.astype(np.float32) y_test = y_test.astype(np.float32) print(x_train.shape, x_test.shape, y_train.shape, y_test.shape) print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype) # Placeholders features = cntk.input_variable((3, 32, 32), np.float32) labels = cntk.input_variable(N_CLASSES, np.float32) # Load symbol sym = create_basic_model(features, N_CLASSES) def save_model(model, learner, file_name): if learner.communicator().is_main(): model.save(file_name) trainer, learner = init_model(sym) for j in range(EPOCHS): for data, label in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True): trainer.train_minibatch({features: data, labels: label}) # Log (this is just last batch in epoch, not average of batches) eval_error = trainer.previous_minibatch_evaluation_average print("Epoch %d | Accuracy: %.6f" % (j+1, (1-eval_error))) z = cntk.softmax(sym) save_model(sym, learner, "{}/cifar_final.model".format(args.input_dir)) n_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE y_guess = np.zeros(n_samples, dtype=np.int) y_truth = np.argmax(y_test[:n_samples], axis=-1) c = 0 for data, label in yield_mb(x_test, y_test, BATCHSIZE): predicted_label_probs = z.eval({features : data}) y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = np.argmax(predicted_label_probs, axis=-1) c += 1 print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess)) cntk.train.distributed.Communicator.finalize()
with cntk.layers.default_options(init=cntk.glorot_uniform(), activation=cntk.relu): net = cntk.layers.Convolution((5,5), 32, pad=True)(input) net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net) net = cntk.layers.Convolution((5,5), 32, pad=True)(net) net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net) net = cntk.layers.Convolution((5,5), 64, pad=True)(net) net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net) net = cntk.layers.Dense(64)(net) net = cntk.layers.Dense(out_dims, activation=None)(net) return net
cloudproviderconfig.go
package manifests import ( "fmt" "io/ioutil" "path/filepath" "github.com/ghodss/yaml" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" icopenstack "github.com/openshift/installer/pkg/asset/installconfig/openstack" "github.com/openshift/installer/pkg/asset/manifests/azure" gcpmanifests "github.com/openshift/installer/pkg/asset/manifests/gcp" openstackmanifests "github.com/openshift/installer/pkg/asset/manifests/openstack" vspheremanifests "github.com/openshift/installer/pkg/asset/manifests/vsphere" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) var ( cloudProviderConfigFileName = filepath.Join(manifestDir, "cloud-provider-config.yaml") ) const ( cloudProviderConfigDataKey = "config" cloudProviderConfigCABundleDataKey = "ca-bundle.pem" ) // CloudProviderConfig generates the cloud-provider-config.yaml files. type CloudProviderConfig struct { ConfigMap *corev1.ConfigMap File *asset.File } var _ asset.WritableAsset = (*CloudProviderConfig)(nil) // Name returns a human friendly name for the asset. func (*CloudProviderConfig) Name() string { return "Cloud Provider Config" } // Dependencies returns all of the dependencies directly needed to generate // the asset. func (*CloudProviderConfig) Dependencies() []asset.Asset { return []asset.Asset{ &installconfig.InstallConfig{}, &installconfig.ClusterID{}, // PlatformCredsCheck just checks the creds (and asks, if needed) // We do not actually use it in this asset directly, hence // it is put in the dependencies but not fetched in Generate &installconfig.PlatformCredsCheck{}, } } // Generate generates the CloudProviderConfig. func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error { installConfig := &installconfig.InstallConfig{} clusterID := &installconfig.ClusterID{} dependencies.Get(installConfig, clusterID) cm := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ConfigMap", }, ObjectMeta: metav1.ObjectMeta{ Namespace: "openshift-config", Name: "cloud-provider-config", }, Data: map[string]string{}, } switch installConfig.Config.Platform.Name() { case libvirttypes.Name, nonetypes.Name, baremetaltypes.Name, ovirttypes.Name, equinixtypes.Name: // TODO(displague) What should Equinix Metal do? return nil case awstypes.Name: // Store the additional trust bundle in the ca-bundle.pem key if the cluster is being installed on a C2S region. trustBundle := installConfig.Config.AdditionalTrustBundle if trustBundle == "" || !awstypes.C2SRegions.Has(installConfig.Config.AWS.Region) { return nil } cm.Data[cloudProviderConfigCABundleDataKey] = trustBundle case openstacktypes.Name: cloud, err := icopenstack.GetSession(installConfig.Config.Platform.OpenStack.Cloud) if err != nil { return errors.Wrap(err, "failed to get cloud config for openstack") } cm.Data[cloudProviderConfigDataKey] = openstackmanifests.CloudProviderConfig(cloud.CloudConfig) // Get the ca-cert-bundle key if there is a value for cacert in clouds.yaml if caPath := cloud.CloudConfig.CACertFile; caPath != "" { caFile, err := ioutil.ReadFile(caPath) if err != nil { return errors.Wrap(err, "failed to read clouds.yaml ca-cert from disk") } cm.Data[cloudProviderConfigCABundleDataKey] = string(caFile) } case azuretypes.Name: session, err := installConfig.Azure.Session() if err != nil {
} nsg := fmt.Sprintf("%s-nsg", clusterID.InfraID) nrg := installConfig.Config.Azure.ClusterResourceGroupName(clusterID.InfraID) if installConfig.Config.Azure.NetworkResourceGroupName != "" { nrg = installConfig.Config.Azure.NetworkResourceGroupName } vnet := fmt.Sprintf("%s-vnet", clusterID.InfraID) if installConfig.Config.Azure.VirtualNetwork != "" { vnet = installConfig.Config.Azure.VirtualNetwork } subnet := fmt.Sprintf("%s-worker-subnet", clusterID.InfraID) if installConfig.Config.Azure.ComputeSubnet != "" { subnet = installConfig.Config.Azure.ComputeSubnet } azureConfig, err := azure.CloudProviderConfig{ CloudName: installConfig.Config.Azure.CloudName, ResourceGroupName: installConfig.Config.Azure.ClusterResourceGroupName(clusterID.InfraID), GroupLocation: installConfig.Config.Azure.Region, ResourcePrefix: clusterID.InfraID, SubscriptionID: session.Credentials.SubscriptionID, TenantID: session.Credentials.TenantID, NetworkResourceGroupName: nrg, NetworkSecurityGroupName: nsg, VirtualNetworkName: vnet, SubnetName: subnet, }.JSON() if err != nil { return errors.Wrap(err, "could not create cloud provider config") } cm.Data[cloudProviderConfigDataKey] = azureConfig case gcptypes.Name: subnet := fmt.Sprintf("%s-worker-subnet", clusterID.InfraID) if installConfig.Config.GCP.ComputeSubnet != "" { subnet = installConfig.Config.GCP.ComputeSubnet } gcpConfig, err := gcpmanifests.CloudProviderConfig(clusterID.InfraID, installConfig.Config.GCP.ProjectID, subnet) if err != nil { return errors.Wrap(err, "could not create cloud provider config") } cm.Data[cloudProviderConfigDataKey] = gcpConfig case vspheretypes.Name: folderPath := installConfig.Config.Platform.VSphere.Folder if len(folderPath) == 0 { dataCenter := installConfig.Config.Platform.VSphere.Datacenter folderPath = fmt.Sprintf("/%s/vm/%s", dataCenter, clusterID.InfraID) } vsphereConfig, err := vspheremanifests.CloudProviderConfig( folderPath, installConfig.Config.Platform.VSphere, ) if err != nil { return errors.Wrap(err, "could not create cloud provider config") } cm.Data[cloudProviderConfigDataKey] = vsphereConfig default: return errors.New("invalid Platform") } cmData, err := yaml.Marshal(cm) if err != nil { return errors.Wrapf(err, "failed to create %s manifest", cpc.Name()) } cpc.ConfigMap = cm cpc.File = &asset.File{ Filename: cloudProviderConfigFileName, Data: cmData, } return nil } // Files returns the files generated by the asset. func (cpc *CloudProviderConfig) Files() []*asset.File { if cpc.File != nil { return []*asset.File{cpc.File} } return []*asset.File{} } // Load loads the already-rendered files back from disk. func (cpc *CloudProviderConfig) Load(f asset.FileFetcher) (bool, error) { return false, nil }
return errors.Wrap(err, "could not get azure session")
identity.go
// Copyright 2019-2020 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package identitybackend import ( "context" "fmt" "reflect" "strconv" "strings" "github.com/cilium/cilium/pkg/allocator" "github.com/cilium/cilium/pkg/idpool" "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" "github.com/cilium/cilium/pkg/k8s/informer" "github.com/cilium/cilium/pkg/k8s/types" "github.com/cilium/cilium/pkg/kvstore" "github.com/cilium/cilium/pkg/labels" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/sirupsen/logrus" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/tools/cache" ) var ( log = logging.DefaultLogger.WithField(logfields.LogSubsys, "crd-allocator") ) func NewCRDBackend(c CRDBackendConfiguration) (allocator.Backend, error) { return &crdBackend{CRDBackendConfiguration: c}, nil } type CRDBackendConfiguration struct { NodeName string Store cache.Store Client clientset.Interface KeyType allocator.AllocatorKey } type crdBackend struct { CRDBackendConfiguration } func (c *crdBackend) DeleteAllKeys(ctx context.Context) { } // sanitizeK8sLabels strips the 'k8s:' prefix in the labels generated by // AllocatorKey.GetAsMap (when the key is k8s labels). In the CRD identity case // we map the labels directly to the ciliumidentity CRD instance, and // kubernetes does not allow ':' in the name of the label. These labels are not // the canonical labels of the identity, but used to ease interaction with the // CRD object. func
(old map[string]string) (selected, skipped map[string]string) { k8sPrefix := labels.LabelSourceK8s + ":" skipped = make(map[string]string, len(old)) selected = make(map[string]string, len(old)) for k, v := range old { if !strings.HasPrefix(k, k8sPrefix) { skipped[k] = v continue // skip non-k8s labels } k = strings.TrimPrefix(k, k8sPrefix) // k8s: is redundant selected[k] = v } return selected, skipped } // AllocateID will create an identity CRD, thus creating the identity for this // key-> ID mapping. // Note: the lock field is not supported with the k8s CRD allocator. func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) error { selectedLabels, skippedLabels := sanitizeK8sLabels(key.GetAsMap()) log.WithField(logfields.Labels, skippedLabels).Info("Skipped non-kubernetes labels when labelling ciliumidentity. All labels will still be used in identity determination") identity := &v2.CiliumIdentity{ ObjectMeta: metav1.ObjectMeta{ Name: id.String(), Labels: selectedLabels, }, SecurityLabels: key.GetAsMap(), } _, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{}) return err } func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { return c.AllocateID(ctx, id, key) } // AcquireReference acquires a reference to the identity. func (c *crdBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { // For CiliumIdentity-based allocation, the reference counting is // handled via CiliumEndpoint. Any CiliumEndpoint referring to a // CiliumIdentity will keep the CiliumIdentity alive. No action is // needed to acquire the reference here. return nil } func (c *crdBackend) RunLocksGC(_ context.Context, _ map[string]kvstore.Value) (map[string]kvstore.Value, error) { return nil, nil } func (c *crdBackend) RunGC(ctx context.Context, staleKeysPrevRound map[string]uint64) (map[string]uint64, error) { return nil, nil } // UpdateKey refreshes the reference that this node is using this key->ID // mapping. It assumes that the identity already exists but will recreate it if // reliablyMissing is true. // Note: the lock field is not supported with the k8s CRD allocator. func (c *crdBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error { err := c.AcquireReference(ctx, id, key, nil) if err == nil { log.WithFields(logrus.Fields{ logfields.Identity: id, logfields.Labels: key, }).Debug("Acquired reference for identity") return nil } // The CRD (aka the master key) is missing. Try to recover by recreating it // if reliablyMissing is set. log.WithError(err).WithFields(logrus.Fields{ logfields.Identity: id, logfields.Labels: key, }).Warning("Unable update CRD identity information with a reference for this node") if reliablyMissing { // Recreate a missing master key if err = c.AllocateID(ctx, id, key); err != nil { return fmt.Errorf("Unable recreate missing CRD identity %q->%q: %s", key, id, err) } } return nil } func (c *crdBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error { return c.UpdateKey(ctx, id, key, reliablyMissing) } // Lock does not return a lock object. Locking is not supported with the k8s // CRD allocator. It is here to meet interface requirements. func (c *crdBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) { return &crdLock{}, nil } type crdLock struct{} // Unlock does not unlock a lock object. Locking is not supported with the k8s // CRD allocator. It is here to meet interface requirements. func (c *crdLock) Unlock(ctx context.Context) error { return nil } // Comparator does nothing. Locking is not supported with the k8s // CRD allocator. It is here to meet interface requirements. func (c *crdLock) Comparator() interface{} { return nil } // get returns the first identity found for the given set of labels as we might // have duplicated entries identities for the same set of labels. func (c *crdBackend) get(ctx context.Context, key allocator.AllocatorKey) *types.Identity { if c.Store == nil { return nil } for _, identityObject := range c.Store.List() { identity, ok := identityObject.(*types.Identity) if !ok { return nil } if reflect.DeepEqual(identity.SecurityLabels, key.GetAsMap()) { return identity } } return nil } // Get returns the first ID which is allocated to a key in the identity CRDs in // kubernetes. // Note: the lock field is not supported with the k8s CRD allocator. func (c *crdBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) { identity := c.get(ctx, key) if identity == nil { return idpool.NoID, nil } id, err := strconv.ParseUint(identity.Name, 10, 64) if err != nil { return idpool.NoID, fmt.Errorf("unable to parse value '%s': %s", identity.Name, err) } return idpool.ID(id), nil } func (c *crdBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) { return c.Get(ctx, key) } // getById fetches the identities from the local store. Returns a nil `err` and // false `exists` if an Identity is not found for the given `id`. func (c *crdBackend) getById(ctx context.Context, id idpool.ID) (idty *types.Identity, exists bool, err error) { if c.Store == nil { return nil, false, fmt.Errorf("store is not available yet") } identityTemplate := &types.Identity{ CiliumIdentity: &v2.CiliumIdentity{ ObjectMeta: metav1.ObjectMeta{ Name: id.String(), }, }, } obj, exists, err := c.Store.Get(identityTemplate) if err != nil { return nil, exists, err } if !exists { return nil, exists, nil } identity, ok := obj.(*types.Identity) if !ok { return nil, false, fmt.Errorf("invalid object") } return identity, true, nil } // GetByID returns the key associated with an ID. Returns nil if no key is // associated with the ID. // Note: the lock field is not supported with the k8s CRD allocator. func (c *crdBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) { identity, exists, err := c.getById(ctx, id) if err != nil { return nil, err } if !exists { return nil, nil } return c.KeyType.PutKeyFromMap(identity.SecurityLabels), nil } // Release dissociates this node from using the identity bound to the given ID. // When an identity has no references it may be garbage collected. func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) { // For CiliumIdentity-based allocation, the reference counting is // handled via CiliumEndpoint. Any CiliumEndpoint referring to a // CiliumIdentity will keep the CiliumIdentity alive. No action is // needed to release the reference here. return nil } func (c *crdBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) { c.Store = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) identityInformer := informer.NewInformerWithStore( cache.NewListWatchFromClient(c.Client.CiliumV2().RESTClient(), "ciliumidentities", v1.NamespaceAll, fields.Everything()), &v2.CiliumIdentity{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if identity, ok := obj.(*types.Identity); ok { if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil { handler.OnAdd(idpool.ID(id), c.KeyType.PutKeyFromMap(identity.SecurityLabels)) } } }, UpdateFunc: func(oldObj, newObj interface{}) { if identity, ok := newObj.(*types.Identity); ok { if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil { handler.OnModify(idpool.ID(id), c.KeyType.PutKeyFromMap(identity.SecurityLabels)) } } }, DeleteFunc: func(obj interface{}) { // The delete event is sometimes for items with unknown state that are // deleted anyway. if deleteObj, isDeleteObj := obj.(cache.DeletedFinalStateUnknown); isDeleteObj { obj = deleteObj.Obj } if identity, ok := obj.(*types.Identity); ok { if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil { handler.OnDelete(idpool.ID(id), c.KeyType.PutKeyFromMap(identity.SecurityLabels)) } } else { log.Debugf("Ignoring unknown delete event %#v", obj) } }, }, types.ConvertToIdentity, c.Store, ) go func() { if ok := cache.WaitForCacheSync(stopChan, identityInformer.HasSynced); ok { handler.OnListDone() } }() identityInformer.Run(stopChan) } func (c *crdBackend) Status() (string, error) { return "OK", nil } func (c *crdBackend) Encode(v string) string { return v }
sanitizeK8sLabels
builder.rs
use crate::prelude::*; use crate::utils::get_iter_capacity; use arrow::array::{ArrayBuilder, ArrayDataBuilder, ArrayRef}; use arrow::datatypes::{ArrowPrimitiveType, Field, ToByteSlice}; use arrow::{ array::{Array, ArrayData, LargeListBuilder, PrimitiveArray, PrimitiveBuilder, StringBuilder}, buffer::Buffer, memory, util::bit_util, }; use std::iter::FromIterator; use std::marker::PhantomData; use std::mem::ManuallyDrop; use std::ops::{Deref, DerefMut}; use std::sync::Arc; pub struct PrimitiveChunkedBuilder<T> where T: ArrowPrimitiveType, { pub builder: PrimitiveBuilder<T>, capacity: usize, field: Field, } impl<T> PrimitiveChunkedBuilder<T> where T: ArrowPrimitiveType, { pub fn new(name: &str, capacity: usize) -> Self { PrimitiveChunkedBuilder { builder: PrimitiveBuilder::<T>::new(capacity), capacity, field: Field::new(name, T::get_data_type(), true), } } /// Appends a value of type `T` into the builder pub fn append_value(&mut self, v: T::Native) { self.builder.append_value(v).expect("could not append"); } /// Appends a null slot into the builder pub fn append_null(&mut self) { self.builder.append_null().expect("could not append"); } /// Appends an `Option<T>` into the builder pub fn append_option(&mut self, v: Option<T::Native>) { self.builder.append_option(v).expect("could not append"); } pub fn finish(mut self) -> ChunkedArray<T> { let arr = Arc::new(self.builder.finish()); let len = arr.len(); ChunkedArray { field: Arc::new(self.field), chunks: vec![arr], chunk_id: vec![len], phantom: PhantomData, } } } impl<T: ArrowPrimitiveType> Deref for PrimitiveChunkedBuilder<T> { type Target = PrimitiveBuilder<T>; fn deref(&self) -> &Self::Target { &self.builder } } impl<T: ArrowPrimitiveType> DerefMut for PrimitiveChunkedBuilder<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.builder } } pub type BooleanChunkedBuilder = PrimitiveChunkedBuilder<BooleanType>; pub struct Utf8ChunkedBuilder { pub builder: StringBuilder, capacity: usize, field: Field, } impl Utf8ChunkedBuilder { pub fn new(name: &str, capacity: usize) -> Self { Utf8ChunkedBuilder { builder: StringBuilder::new(capacity), capacity, field: Field::new(name, ArrowDataType::Utf8, true), } } /// Appends a value of type `T` into the builder pub fn append_value<S: AsRef<str>>(&mut self, v: S) { self.builder .append_value(v.as_ref()) .expect("could not append"); } /// Appends a null slot into the builder pub fn append_null(&mut self) { self.builder.append_null().expect("could not append"); } pub fn append_option<S: AsRef<str>>(&mut self, opt: Option<S>) { match opt { Some(s) => self.append_value(s.as_ref()), None => self.append_null(), } } pub fn finish(mut self) -> Utf8Chunked { let arr = Arc::new(self.builder.finish()); let len = arr.len(); ChunkedArray { field: Arc::new(self.field), chunks: vec![arr], chunk_id: vec![len], phantom: PhantomData, } } } impl Deref for Utf8ChunkedBuilder { type Target = StringBuilder; fn deref(&self) -> &Self::Target { &self.builder } } impl DerefMut for Utf8ChunkedBuilder { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.builder } } pub fn build_primitive_ca_with_opt<T>(s: &[Option<T::Native>], name: &str) -> ChunkedArray<T> where T: ArrowPrimitiveType, T::Native: Copy, { let mut builder = PrimitiveChunkedBuilder::new(name, s.len()); for opt in s { builder.append_option(*opt); } let ca = builder.finish(); ca } fn set_null_bits( mut builder: ArrayDataBuilder, null_bit_buffer: Option<Buffer>, null_count: usize, len: usize, ) -> ArrayDataBuilder { if null_count > 0 { let null_bit_buffer = null_bit_buffer.expect("implementation error. Should not be None if null_count > 0"); debug_assert!(null_count == len - bit_util::count_set_bits(null_bit_buffer.data())); builder = builder .null_count(null_count) .null_bit_buffer(null_bit_buffer); } builder } /// Take an existing slice and a null bitmap and construct an arrow array. pub fn build_with_existing_null_bitmap_and_slice<T>( null_bit_buffer: Option<Buffer>, null_count: usize, values: &[T::Native], ) -> PrimitiveArray<T> where T: ArrowPrimitiveType, { let len = values.len(); // See: // https://docs.rs/arrow/0.16.0/src/arrow/array/builder.rs.html#314 // TODO: make implementation for aligned owned vector for zero copy creation. let builder = ArrayData::builder(T::get_data_type()) .len(len) .add_buffer(Buffer::from(values.to_byte_slice())); let builder = set_null_bits(builder, null_bit_buffer, null_count, len); let data = builder.build(); PrimitiveArray::<T>::from(data) } /// Get the null count and the null bitmap of the arrow array pub fn get_bitmap<T: Array + ?Sized>(arr: &T) -> (usize, Option<Buffer>) { let data = arr.data(); ( data.null_count(), data.null_bitmap().as_ref().map(|bitmap| { let buff = bitmap.buffer_ref(); buff.clone() }), ) } // Used in polars/src/chunked_array/apply.rs:24 to collect from aligned vecs and null bitmaps impl<T> FromIterator<(AlignedVec<T::Native>, (usize, Option<Buffer>))> for ChunkedArray<T> where T: PolarsNumericType, { fn from_iter<I: IntoIterator<Item = (AlignedVec<T::Native>, (usize, Option<Buffer>))>>( iter: I, ) -> Self { let mut chunks = vec![]; for (values, (null_count, opt_buffer)) in iter { let arr = aligned_vec_to_primitive_array::<T>(values, opt_buffer, null_count); chunks.push(Arc::new(arr) as ArrayRef) } ChunkedArray::new_from_chunks("from_iter", chunks) } } /// Returns the nearest number that is `>=` than `num` and is a multiple of 64 #[inline] pub fn round_upto_multiple_of_64(num: usize) -> usize { round_upto_power_of_2(num, 64) } /// Returns the nearest multiple of `factor` that is `>=` than `num`. Here `factor` must /// be a power of 2. fn round_upto_power_of_2(num: usize, factor: usize) -> usize { debug_assert!(factor > 0 && (factor & (factor - 1)) == 0); (num + (factor - 1)) & !(factor - 1) } /// Take an owned Vec that is 64 byte aligned and create a zero copy PrimitiveArray /// Can also take a null bit buffer into account. pub fn aligned_vec_to_primitive_array<T: ArrowPrimitiveType>( values: AlignedVec<T::Native>, null_bit_buffer: Option<Buffer>, null_count: usize, ) -> PrimitiveArray<T> { let values = values.into_inner(); let vec_len = values.len(); let me = ManuallyDrop::new(values); let ptr = me.as_ptr() as *const u8; let len = me.len() * std::mem::size_of::<T::Native>(); let capacity = me.capacity() * std::mem::size_of::<T::Native>(); debug_assert_eq!((ptr as usize) % 64, 0); let buffer = unsafe { Buffer::from_raw_parts(ptr, len, capacity) }; let builder = ArrayData::builder(T::get_data_type()) .len(vec_len) .add_buffer(buffer); let builder = set_null_bits(builder, null_bit_buffer, null_count, vec_len); let data = builder.build(); PrimitiveArray::<T>::from(data) } pub trait AlignedAlloc<T> { fn with_capacity_aligned(size: usize) -> Vec<T>; } impl<T> AlignedAlloc<T> for Vec<T> { /// Create a new Vec where first bytes memory address has an alignment of 64 bytes, as described /// by arrow spec. /// Read more: /// https://github.com/rust-ndarray/ndarray/issues/771 fn with_capacity_aligned(size: usize) -> Vec<T> { // Can only have a zero copy to arrow memory if address of first byte % 64 == 0 let t_size = std::mem::size_of::<T>(); let capacity = size * t_size; let ptr = memory::allocate_aligned(capacity) as *mut T; unsafe { Vec::from_raw_parts(ptr, 0, capacity) } } } pub struct AlignedVec<T>(pub Vec<T>); impl<T> FromIterator<T> for AlignedVec<T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let mut iter = iter.into_iter(); let sh = iter.size_hint(); let size = sh.1.unwrap_or(sh.0); let mut inner = Vec::with_capacity_aligned(size); while let Some(v) = iter.next() { inner.push(v) } // Iterator size hint wasn't correct and reallocation has occurred assert!(inner.len() <= size); AlignedVec(inner) } } impl<T> AlignedVec<T> { pub fn new(v: Vec<T>) -> Result<Self> { if v.as_ptr() as usize % 64 != 0 { Err(PolarsError::MemoryNotAligned) } else { Ok(AlignedVec(v)) } } pub fn into_inner(self) -> Vec<T> { self.0 } } pub trait NewChunkedArray<T, N> { fn new_from_slice(name: &str, v: &[N]) -> Self; fn new_from_opt_slice(name: &str, opt_v: &[Option<N>]) -> Self; /// Create a new ChunkedArray from an iterator. fn new_from_opt_iter(name: &str, it: impl Iterator<Item = Option<N>>) -> Self; /// Create a new ChunkedArray from an iterator. fn new_from_iter(name: &str, it: impl Iterator<Item = N>) -> Self; } impl<T> NewChunkedArray<T, T::Native> for ChunkedArray<T> where T: ArrowPrimitiveType, { fn new_from_slice(name: &str, v: &[T::Native]) -> Self { let mut builder = PrimitiveChunkedBuilder::<T>::new(name, v.len()); v.iter().for_each(|&v| builder.append_value(v)); builder.finish() } fn new_from_opt_slice(name: &str, opt_v: &[Option<T::Native>]) -> Self { let mut builder = PrimitiveChunkedBuilder::<T>::new(name, opt_v.len()); opt_v.iter().for_each(|&opt| builder.append_option(opt)); builder.finish() } fn new_from_opt_iter( name: &str, it: impl Iterator<Item = Option<T::Native>>, ) -> ChunkedArray<T> { let mut builder = PrimitiveChunkedBuilder::new(name, get_iter_capacity(&it)); it.for_each(|opt| builder.append_option(opt)); builder.finish() } /// Create a new ChunkedArray from an iterator. fn new_from_iter(name: &str, it: impl Iterator<Item = T::Native>) -> ChunkedArray<T> { let mut builder = PrimitiveChunkedBuilder::new(name, get_iter_capacity(&it)); it.for_each(|opt| builder.append_value(opt)); builder.finish() } } impl<S> NewChunkedArray<Utf8Type, S> for Utf8Chunked where S: AsRef<str>, { fn new_from_slice(name: &str, v: &[S]) -> Self { let mut builder = StringBuilder::new(v.len()); v.into_iter().for_each(|val| { builder .append_value(val.as_ref()) .expect("Could not append value"); }); let field = Arc::new(Field::new(name, ArrowDataType::Utf8, true)); ChunkedArray { field, chunks: vec![Arc::new(builder.finish())], chunk_id: vec![v.len()], phantom: PhantomData, } } fn new_from_opt_slice(name: &str, opt_v: &[Option<S>]) -> Self { let mut builder = Utf8ChunkedBuilder::new(name, opt_v.len()); opt_v.iter().for_each(|opt| match opt { Some(v) => builder.append_value(v.as_ref()), None => builder.append_null(), }); builder.finish() } fn new_from_opt_iter(name: &str, it: impl Iterator<Item = Option<S>>) -> Self { let mut builder = Utf8ChunkedBuilder::new(name, get_iter_capacity(&it)); it.for_each(|opt| builder.append_option(opt)); builder.finish() } /// Create a new ChunkedArray from an iterator. fn new_from_iter(name: &str, it: impl Iterator<Item = S>) -> Self { let mut builder = Utf8ChunkedBuilder::new(name, get_iter_capacity(&it)); it.for_each(|v| builder.append_value(v)); builder.finish() } } pub trait LargListBuilderTrait { fn append_opt_series(&mut self, opt_s: &Option<Series>); fn append_series(&mut self, s: &Series); fn finish(&mut self) -> LargeListChunked; } pub struct LargeListPrimitiveChunkedBuilder<T> where T: ArrowPrimitiveType, { pub builder: LargeListBuilder<PrimitiveBuilder<T>>, field: Field, } macro_rules! append_opt_series { ($self:ident, $opt_s: ident) => {{ match $opt_s { Some(s) => { let data = s.array_data(); $self .builder .values() .append_data(&data) .expect("should not fail"); $self.builder.append(true).expect("should not fail"); } None => { $self.builder.append(false).expect("should not fail"); } } }}; } macro_rules! append_series { ($self:ident, $s: ident) => {{ let data = $s.array_data(); $self .builder .values() .append_data(&data) .expect("should not fail"); $self.builder.append(true).expect("should not fail"); }}; } macro_rules! finish_largelist_builder { ($self:ident) => {{ let arr = Arc::new($self.builder.finish()); let len = arr.len(); LargeListChunked { field: Arc::new($self.field.clone()), chunks: vec![arr], chunk_id: vec![len], phantom: PhantomData, } }}; } impl<T> LargeListPrimitiveChunkedBuilder<T> where T: ArrowPrimitiveType, { pub fn new(name: &str, values_builder: PrimitiveBuilder<T>, capacity: usize) -> Self { let builder = LargeListBuilder::with_capacity(values_builder, capacity); let field = Field::new( name, ArrowDataType::LargeList(Box::new(T::get_data_type())), true, ); LargeListPrimitiveChunkedBuilder { builder, field } } pub fn append_slice(&mut self, opt_v: Option<&[T::Native]>) { match opt_v { Some(v) => { self.builder .values() .append_slice(v) .expect("could not append"); self.builder.append(true).expect("should not fail"); } None => { self.builder.append(false).expect("should not fail"); } } } pub fn append_opt_slice(&mut self, opt_v: Option<&[Option<T::Native>]>) { match opt_v { Some(v) => { v.iter().for_each(|opt| { self.builder .values() .append_option(*opt) .expect("could not append") }); self.builder.append(true).expect("should not fail"); } None => { self.builder.append(false).expect("should not fail"); } } } pub fn append_null(&mut self) { self.builder.append(false).expect("should not fail"); } } impl<T> LargListBuilderTrait for LargeListPrimitiveChunkedBuilder<T> where T: ArrowPrimitiveType, { fn append_opt_series(&mut self, opt_s: &Option<Series>) { append_opt_series!(self, opt_s) } fn append_series(&mut self, s: &Series) { append_series!(self, s); } fn finish(&mut self) -> LargeListChunked { finish_largelist_builder!(self) } } pub struct LargeListUtf8ChunkedBuilder { builder: LargeListBuilder<StringBuilder>, field: Field, } impl LargeListUtf8ChunkedBuilder { pub fn new(name: &str, values_builder: StringBuilder, capacity: usize) -> Self { let builder = LargeListBuilder::with_capacity(values_builder, capacity); let field = Field::new( name, ArrowDataType::LargeList(Box::new(ArrowDataType::Utf8)), true, ); LargeListUtf8ChunkedBuilder { builder, field } } } impl LargListBuilderTrait for LargeListUtf8ChunkedBuilder { fn append_opt_series(&mut self, opt_s: &Option<Series>) { append_opt_series!(self, opt_s) } fn append_series(&mut self, s: &Series) { append_series!(self, s); } fn finish(&mut self) -> LargeListChunked { finish_largelist_builder!(self) } } pub fn get_large_list_builder( dt: &ArrowDataType, capacity: usize, name: &str, ) -> Box<dyn LargListBuilderTrait> { macro_rules! get_primitive_builder { ($type:ty) => {{ let values_builder = PrimitiveBuilder::<$type>::new(capacity); let builder = LargeListPrimitiveChunkedBuilder::new(&name, values_builder, capacity); Box::new(builder) }}; } macro_rules! get_utf8_builder { () => {{ let values_builder = StringBuilder::new(capacity); let builder = LargeListUtf8ChunkedBuilder::new(&name, values_builder, capacity); Box::new(builder) }}; } match_arrow_data_type_apply_macro!(dt, get_primitive_builder, get_utf8_builder) } #[cfg(test)] mod test { use super::*; use arrow::array::Int32Array; #[test] fn test_existing_null_bitmap() { let mut builder = PrimitiveBuilder::<UInt32Type>::new(3); for val in &[Some(1), None, Some(2)] { builder.append_option(*val).unwrap(); } let arr = builder.finish(); let (null_count, buf) = get_bitmap(&arr); let new_arr = build_with_existing_null_bitmap_and_slice::<UInt32Type>(buf, null_count, &[7, 8, 9]); assert!(new_arr.is_valid(0)); assert!(new_arr.is_null(1)); assert!(new_arr.is_valid(2)); } #[test] fn from_vec() { // Can only have a zero copy to arrow memory if address of first byte % 64 == 0 let mut v = Vec::with_capacity_aligned(2); v.push(1); v.push(2); let ptr = v.as_ptr(); assert_eq!((ptr as usize) % 64, 0); let a = aligned_vec_to_primitive_array::<Int32Type>(AlignedVec::new(v).unwrap(), None, 0); assert_eq!(a.value_slice(0, 2), &[1, 2]) } #[test] fn test_list_builder() { let values_builder = Int32Array::builder(10); let mut builder = LargeListPrimitiveChunkedBuilder::new("a", values_builder, 10); // create a series containing two chunks let mut s1 = Int32Chunked::new_from_slice("a", &[1, 2, 3]).into_series(); let s2 = Int32Chunked::new_from_slice("b", &[4, 5, 6]).into_series(); s1.append(&s2).unwrap(); builder.append_series(&s1); builder.append_series(&s2); let ls = builder.finish(); if let AnyType::LargeList(s) = ls.get_any(0) { // many chunks are aggregated to one in the ListArray assert_eq!(s.len(), 6) } else { assert!(false) } if let AnyType::LargeList(s) = ls.get_any(1) { assert_eq!(s.len(), 3) } else
} }
{ assert!(false) }
channel.py
# -*- coding: utf8 -*- __all__ = ('Channel',) import datetime from sqlalchemy import func from notifico import db from notifico.models.bot import BotEvent class Channel(db.Model): id = db.Column(db.Integer, primary_key=True) created = db.Column(db.TIMESTAMP(), default=datetime.datetime.utcnow) channel = db.Column(db.String(80), nullable=False) host = db.Column(db.String(255), nullable=False) port = db.Column(db.Integer, default=6667) ssl = db.Column(db.Boolean, default=False) public = db.Column(db.Boolean, default=False) project_id = db.Column(db.Integer, db.ForeignKey('project.id')) project = db.relationship('Project', backref=db.backref( 'channels', order_by=id, lazy='dynamic', cascade='all, delete-orphan' )) @classmethod def new(cls, channel, host, port=6667, ssl=False, public=False): c = cls() c.channel = channel c.host = host c.port = port c.ssl = ssl c.public = public return c @classmethod def
(cls): q = ( db.session.query( Channel.host, func.count(Channel.channel).label('count') ) .filter_by(public=True) .group_by(Channel.host) .order_by('-count') ) for network, channel_count in q: yield network, channel_count def last_event(self): """ Returns the latest BotEvent to occur for this channel. """ return BotEvent.query.filter_by( host=self.host, port=self.port, ssl=self.ssl, channel=self.channel ).order_by(BotEvent.created.desc()).first() @classmethod def visible(cls, q, user=None): """ Modifies the sqlalchemy query `q` to only show channels accessible to `user`. If `user` is ``None``, only shows public channels in public projects. """ from notifico.models import Project if user and user.in_group('admin'): # We don't do any filtering for admins, # who should have full visibility. pass else: q = q.join(Channel.project).filter( Project.public == True, Channel.public == True ) return q
channel_count_by_network
gen_GpuCommandBuffer.rs
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[cfg(web_sys_unstable_apis)] #[wasm_bindgen] extern "C" { # [ wasm_bindgen ( extends = :: js_sys :: Object , js_name = GPUCommandBuffer , typescript_type = "GPUCommandBuffer" ) ] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `GpuCommandBuffer` class."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/GPUCommandBuffer)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuCommandBuffer`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub type GpuCommandBuffer; #[cfg(web_sys_unstable_apis)] # [ wasm_bindgen ( structural , method , getter , js_class = "GPUCommandBuffer" , js_name = executionTime ) ] #[doc = "Getter for the `executionTime` field of this object."] #[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/GPUCommandBuffer/executionTime)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuCommandBuffer`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn execution_time(this: &GpuCommandBuffer) -> ::js_sys::Promise; #[cfg(web_sys_unstable_apis)] # [ wasm_bindgen ( structural , method , getter , js_class = "GPUCommandBuffer" , js_name = label ) ] #[doc = "Getter for the `label` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/GPUCommandBuffer/label)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuCommandBuffer`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn label(this: &GpuCommandBuffer) -> Option<String>; #[cfg(web_sys_unstable_apis)] # [ wasm_bindgen ( structural , method , setter , js_class = "GPUCommandBuffer" , js_name = label ) ] #[doc = "Setter for the `label` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/GPUCommandBuffer/label)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuCommandBuffer`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn set_label(this: &GpuCommandBuffer, value: Option<&str>); }
test_make_solr_document.py
from unittest import TestCase import importlib import ast import json import logging from ConfigParser import ConfigParser from io import StringIO from mock import patch #ignore pycountry debug logging quiet = logging.getLogger('pycountry.db') quiet.setLevel(logging.ERROR) class TestMake_solr_document(TestCase): record = { "src_file_rec": "travel/travel.csv:1", "dob": "17/04/1979", "name": "George Jetson", "passport_no": "99999999", "passport_country": "NZ", "departure_port": "SYD", "arrival_port": "AKL" } meta = { "src_file_cid": 10 } result = { 'Airport.country_ss': [u'NZ', u'AU'], 'Airport.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'], 'Airport_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'], 'Departed.timestamp_ss': [u'2013-12-10T00:00:00Z'], 'Entity_ss': [u'GEORGE JETSON'], 'Event_ss': [u'UNKNOWN'], 'Flight_ss': [u'UNKNOWN'], 'IssuedDocument.country_ss': [u'NZ'], 'IssuedDocument_ss': [u'99999999'], 'Location.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'], 'Location_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'], 'Passport.country_ss': [u'NZ'], 'Passport_ss': [u'99999999'], 'Person_ss': [u'GEORGE JETSON'], 'Port.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'], 'Port_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'], 'Travelled.timestamp_ss': [u'2013-12-10T00:00:00Z'], 'arrival_port_ss': [u'AKL'], 'attr_types': [u'Airport.geoloc', u'Departed.timestamp', u'Airport.country', u'city', u'dob', u'country', u'IssuedDocument.country', u'Location.geoloc', u'arrival_port', u'departure_port', u'geoloc', u'Travelled.timestamp', u'timestamp', u'Passport.country',u'Port.geoloc'], 'city_ss': [u'Sydney', u'Auckland'], 'country_ss': [u'NZ', u'AU'], 'data': u'["Person","GEORGE JETSON",[["dob","1979-04-17T00:00:00Z"]],[["Holds",[],[["Passport","99999999",' '[["country","NZ"]],[["Travelled",[["timestamp","2013-12-10T00:00:00Z"]],[["Flight","UNKNOWN",' '[["departure_port","SYD"],["arrival_port","AKL"]],[["Departed",[["timestamp",' '"2013-12-10T00:00:00Z"]],[["Airport","Sydney Intl (SYD)",[["country","AU"],' '["geoloc","-33.946111,151.177222"],["city","Sydney"]],[]]]],["Arrived",[],[["Airport",' '"Auckland Intl (AKL)",[["country","NZ"],["geoloc","-37.008056,174.791667"],["city","Auckland"]],' '[]]]]]]]]]]]]]]', 'departure_port_ss': [u'SYD'], 'dob_dts': [u'1979-04-17T00:00:00Z'], 'geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'], 'id': u'6daf5bc24d75fe36d25700633359fbe7c166d66ef19fc8eba236fa8670e7fa40', 'link_types': [u'Travelled', u'Holds', u'Arrived', u'Departed'], 'object_types': [u'Flight', u'Entity', u'Person', u'Airport', u'Location', u'Passport', u'IssuedDocument', u'Port', u'Event'], 'raw': u'{"arrival_port":"AKL","departure_port":"SYD","dob":"17/04/1979","name":"George Jetson",' + '"passport_country":"NZ","passport_no":"99999999"}', 'src_file_cid': 10, 'src_file_rec': [u'test/file_1:1'], 'timestamp_ss': [u'2013-12-10T00:00:00Z'] } @patch('serene_metadata.config.SereneConfig') def setUp(self, mock_config): from serene_index.helpers.index_helpers import mk_error_counter, make_solr_document from serene_metadata import generate_example_metadata self.error_counter = mk_error_counter() module = importlib.import_module('serene_index.modules.module_flight') self.builder = getattr(module, 'record_builder', None) generated_metadata = generate_example_metadata() print json.dumps(generated_metadata, indent=1) self.meta.update(generated_metadata) self.result.update(generated_metadata) self.solr_doc = make_solr_document(r=self.record, builder=self.builder, base=self.meta, debug=False, error_counter=self.error_counter) def
(self): self.error_counter = None def test_make_solr_document(self): self.maxDiff = None self.assertDictEqual(self.result, self.solr_doc) # a = json.dumps(self.solr_doc, indent=1, sort_keys=True) # b = json.dumps(self.result, indent=1, sort_keys=True) def test_json_correct(self): rec = json.dumps(self.solr_doc) self.assertTrue(json.loads(rec), 'Solr document does not parse as json')
tearDown
imu.py
# -*- coding: utf-8 -*- import pygame from OpenGL.GL import * from OpenGL.GLU import * import socket import json from pygame.locals import * SCREEN_SIZE = (800, 600) address = ('', 5000) def resize(width, height): glViewport(0, 0, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(width) / height, 0.001, 10.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity() gluLookAt(1.0, 2.0, -5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0) def init(): glEnable(GL_DEPTH_TEST) glClearColor(0.0, 0.0, 0.0, 0.0) glShadeModel(GL_SMOOTH) glEnable(GL_BLEND) glEnable(GL_POLYGON_SMOOTH) glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST) def setupSocket(): # setup socket, blocking by default global sock sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(address) def read_data(): global ax, ay, az, acx, acy, acz, temp ax = ay = az = acx = acy = acz = temp = 0.0 msg, addr = sock.recvfrom(1024) if msg: msg.decode() data = json.loads(msg) #print(data) ax, ay ,az = data["filter"] acx, acy, acz = data["accel"] temp = data["temp"] def drawText(position, textString): font = pygame.font.SysFont("Courier", 18, True) textSurface = font.render(textString, True, (255,255,255,255), (0,0,0,255)) textData = pygame.image.tostring(textSurface, "RGBA", True) glRasterPos3d(*position) glDrawPixels(textSurface.get_width(), textSurface.get_height(), GL_RGBA, GL_UNSIGNED_BYTE, textData) def run(): setupSocket() pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE | OPENGL | DOUBLEBUF) resize(*SCREEN_SIZE)
clock = pygame.time.Clock() cube = Cube((0.0, 0.0, 0.0), (.5, .5, .7)) angle = 0 while True: then = pygame.time.get_ticks() for event in pygame.event.get(): if event.type == QUIT: return if event.type == KEYUP and event.key == K_ESCAPE: return read_data() glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) text = "pitch: " + str("{0:.1f}".format(ay)) + " roll: " + str("{0:.1f}".format(ax)) drawText((1, -4, 2), text) text = "accx: " + str("{0:.2f}".format(acx)) + " accy: " + str("{0:.2f}".format(acy)) + " accz: " + str( "{0:.2f}".format(acz)) drawText((1, -4.3, 2), text) text = "temp: " + str("{0:.1f}".format(temp)) drawText((1, -4.6, 2), text) glColor((1., 1., 1.)) glLineWidth(1) glBegin(GL_LINES) for x in range(-20, 22, 2): glVertex3f(x / 10., -1, -1) glVertex3f(x / 10., -1, 1) for x in range(-20, 22, 2): glVertex3f(x / 10., -1, 1) glVertex3f(x / 10., 1, 1) for z in range(-10, 12, 2): glVertex3f(-2, -1, z / 10.) glVertex3f(2, -1, z / 10.) for z in range(-10, 12, 2): glVertex3f(-2, -1, z / 10.) glVertex3f(-2, 1, z / 10.) for z in range(-10, 12, 2): glVertex3f(2, -1, z / 10.) glVertex3f(2, 1, z / 10.) for y in range(-10, 12, 2): glVertex3f(-2, y / 10., 1) glVertex3f(2, y / 10., 1) for y in range(-10, 12, 2): glVertex3f(-2, y / 10., 1) glVertex3f(-2, y / 10., -1) for y in range(-10, 12, 2): glVertex3f(2, y / 10., 1) glVertex3f(2, y / 10., -1) glEnd() glPushMatrix() glRotate(az, 0, 1, 0) glRotate(ay, 1, 0, 0) glRotate(ax, 0, 0, 1) cube.render() glPopMatrix() pygame.display.flip() class Cube(object): def __init__(self, position, color): self.position = position self.color = color # Cube information num_faces = 6 vertices = [(-1.0, -0.2, 0.5), (1.0, -0.2, 0.5), (1.0, 0.2, 0.5), (-1.0, 0.2, 0.5), (-1.0, -0.2, -0.5), (1.0, -0.2, -0.5), (1.0, 0.2, -0.5), (-1.0, 0.2, -0.5)] normals = [(0.0, 0.0, +1.0), # front (0.0, 0.0, -1.0), # back (+1.0, 0.0, 0.0), # right (-1.0, 0.0, 0.0), # left (0.0, +1.0, 0.0), # top (0.0, -1.0, 0.0)] # bottom vertex_indices = [(0, 1, 2, 3), # front (4, 5, 6, 7), # back (1, 5, 6, 2), # right (0, 4, 7, 3), # left (3, 2, 6, 7), # top (0, 1, 5, 4)] # bottom def render(self): then = pygame.time.get_ticks() vertices = self.vertices # Draw all 6 faces of the cube glBegin(GL_QUADS) for face_no in range(self.num_faces): if face_no == 1: glColor(1.0, 0.0, 0.0) else: glColor(self.color) glNormal3dv(self.normals[face_no]) v1, v2, v3, v4 = self.vertex_indices[face_no] glVertex(vertices[v1]) glVertex(vertices[v2]) glVertex(vertices[v3]) glVertex(vertices[v4]) glEnd() if __name__ == "__main__": run()
init()
jiraQARules.js
function
(q) { $.get("/jira/tickets", {q: q, full: $("#full")[0].checked}) .done(function (data) { //let result = $("#result"); //result.empty(); //result.jsonView(JSON.stringify(data), {collapsed: true}); let value = ""; data.forEach(function (issue) { value = value + "\n" + issue.key; if (issue.fields.customfield_10351) { issue.fields.customfield_10351.forEach(function (valueIndicator) { if (valueIndicator.value === "NextGen BBVA") { value = value + " es NextGen"; if (!issue.fields.customfield_11905) { value = value + " pero no ha indicado cuáles componentes NextGen utiliza." } else { value = value + "\n" + issue.key + " tiene todos sus indicadores OK" } } else { value = value + "\n" + issue.key + " tiene todos sus indicadores OK" } }); } }); $("#validation").html(value); }); }
jiraQARules
OutStruct.py
# -*- coding: utf-8 -*- # File generated according to Generator/ClassesRef/Output/OutStruct.csv # WARNING! All changes made in this file will be lost! """Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Output/OutStruct """ from os import linesep from sys import getsizeof from logging import getLogger from ._check import check_var, raise_ from ..Functions.get_logger import get_logger from ..Functions.save import save from ..Functions.copy import copy from ..Functions.load import load_init_dict from ..Functions.Load.import_class import import_class from ._frozen import FrozenClass from ._check import InitUnKnowClassError from .MeshSolution import MeshSolution class OutStruct(FrozenClass): """Gather the structural module outputs""" VERSION = 1 # save and copy methods are available in all object save = save copy = copy # get_logger method is available in all object get_logger = get_logger def __init__( self, Time=None, Angle=None, Nt_tot=None, Na_tot=None, logger_name="Pyleecan.Structural", Yr=None, Vr=None, Ar=None, meshsolution=-1, FEA_dict=None, init_dict=None, init_str=None, ): """Constructor of the class. Can be use in three ways : - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values for pyleecan type, -1 will call the default constructor - __init__ (init_dict = d) d must be a dictionary with property names as keys - __init__ (init_str = s) s must be a string s is the file path to load ndarray or list can be given for Vector and Matrix object or dict can be given for pyleecan Object""" if init_str is not None: # Load from a file init_dict = load_init_dict(init_str)[1] if init_dict is not None: # Initialisation by dict
# Overwrite default value with init_dict content if "Time" in list(init_dict.keys()): Time = init_dict["Time"] if "Angle" in list(init_dict.keys()): Angle = init_dict["Angle"] if "Nt_tot" in list(init_dict.keys()): Nt_tot = init_dict["Nt_tot"] if "Na_tot" in list(init_dict.keys()): Na_tot = init_dict["Na_tot"] if "logger_name" in list(init_dict.keys()): logger_name = init_dict["logger_name"] if "Yr" in list(init_dict.keys()): Yr = init_dict["Yr"] if "Vr" in list(init_dict.keys()): Vr = init_dict["Vr"] if "Ar" in list(init_dict.keys()): Ar = init_dict["Ar"] if "meshsolution" in list(init_dict.keys()): meshsolution = init_dict["meshsolution"] if "FEA_dict" in list(init_dict.keys()): FEA_dict = init_dict["FEA_dict"] # Set the properties (value check and convertion are done in setter) self.parent = None self.Time = Time self.Angle = Angle self.Nt_tot = Nt_tot self.Na_tot = Na_tot self.logger_name = logger_name self.Yr = Yr self.Vr = Vr self.Ar = Ar self.meshsolution = meshsolution self.FEA_dict = FEA_dict # The class is frozen, for now it's impossible to add new properties self._freeze() def __str__(self): """Convert this object in a readeable string (for print)""" OutStruct_str = "" if self.parent is None: OutStruct_str += "parent = None " + linesep else: OutStruct_str += "parent = " + str(type(self.parent)) + " object" + linesep OutStruct_str += "Time = " + str(self.Time) + linesep + linesep OutStruct_str += "Angle = " + str(self.Angle) + linesep + linesep OutStruct_str += "Nt_tot = " + str(self.Nt_tot) + linesep OutStruct_str += "Na_tot = " + str(self.Na_tot) + linesep OutStruct_str += 'logger_name = "' + str(self.logger_name) + '"' + linesep OutStruct_str += "Yr = " + str(self.Yr) + linesep + linesep OutStruct_str += "Vr = " + str(self.Vr) + linesep + linesep OutStruct_str += "Ar = " + str(self.Ar) + linesep + linesep if self.meshsolution is not None: tmp = ( self.meshsolution.__str__() .replace(linesep, linesep + "\t") .rstrip("\t") ) OutStruct_str += "meshsolution = " + tmp else: OutStruct_str += "meshsolution = None" + linesep + linesep OutStruct_str += "FEA_dict = " + str(self.FEA_dict) + linesep return OutStruct_str def __eq__(self, other): """Compare two objects (skip parent)""" if type(other) != type(self): return False if other.Time != self.Time: return False if other.Angle != self.Angle: return False if other.Nt_tot != self.Nt_tot: return False if other.Na_tot != self.Na_tot: return False if other.logger_name != self.logger_name: return False if other.Yr != self.Yr: return False if other.Vr != self.Vr: return False if other.Ar != self.Ar: return False if other.meshsolution != self.meshsolution: return False if other.FEA_dict != self.FEA_dict: return False return True def compare(self, other, name="self", ignore_list=None): """Compare two objects and return list of differences""" if ignore_list is None: ignore_list = list() if type(other) != type(self): return ["type(" + name + ")"] diff_list = list() if (other.Time is None and self.Time is not None) or ( other.Time is not None and self.Time is None ): diff_list.append(name + ".Time None mismatch") elif self.Time is not None: diff_list.extend(self.Time.compare(other.Time, name=name + ".Time")) if (other.Angle is None and self.Angle is not None) or ( other.Angle is not None and self.Angle is None ): diff_list.append(name + ".Angle None mismatch") elif self.Angle is not None: diff_list.extend(self.Angle.compare(other.Angle, name=name + ".Angle")) if other._Nt_tot != self._Nt_tot: diff_list.append(name + ".Nt_tot") if other._Na_tot != self._Na_tot: diff_list.append(name + ".Na_tot") if other._logger_name != self._logger_name: diff_list.append(name + ".logger_name") if (other.Yr is None and self.Yr is not None) or ( other.Yr is not None and self.Yr is None ): diff_list.append(name + ".Yr None mismatch") elif self.Yr is not None: diff_list.extend(self.Yr.compare(other.Yr, name=name + ".Yr")) if (other.Vr is None and self.Vr is not None) or ( other.Vr is not None and self.Vr is None ): diff_list.append(name + ".Vr None mismatch") elif self.Vr is not None: diff_list.extend(self.Vr.compare(other.Vr, name=name + ".Vr")) if (other.Ar is None and self.Ar is not None) or ( other.Ar is not None and self.Ar is None ): diff_list.append(name + ".Ar None mismatch") elif self.Ar is not None: diff_list.extend(self.Ar.compare(other.Ar, name=name + ".Ar")) if (other.meshsolution is None and self.meshsolution is not None) or ( other.meshsolution is not None and self.meshsolution is None ): diff_list.append(name + ".meshsolution None mismatch") elif self.meshsolution is not None: diff_list.extend( self.meshsolution.compare( other.meshsolution, name=name + ".meshsolution" ) ) if other._FEA_dict != self._FEA_dict: diff_list.append(name + ".FEA_dict") # Filter ignore differences diff_list = list(filter(lambda x: x not in ignore_list, diff_list)) return diff_list def __sizeof__(self): """Return the size in memory of the object (including all subobject)""" S = 0 # Full size of the object S += getsizeof(self.Time) S += getsizeof(self.Angle) S += getsizeof(self.Nt_tot) S += getsizeof(self.Na_tot) S += getsizeof(self.logger_name) S += getsizeof(self.Yr) S += getsizeof(self.Vr) S += getsizeof(self.Ar) S += getsizeof(self.meshsolution) if self.FEA_dict is not None: for key, value in self.FEA_dict.items(): S += getsizeof(value) + getsizeof(key) return S def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs): """ Convert this object in a json serializable dict (can be use in __init__). type_handle_ndarray: int How to handle ndarray (0: tolist, 1: copy, 2: nothing) keep_function : bool True to keep the function object, else return str Optional keyword input parameter is for internal use only and may prevent json serializability. """ OutStruct_dict = dict() if self.Time is None: OutStruct_dict["Time"] = None else: OutStruct_dict["Time"] = self.Time.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) if self.Angle is None: OutStruct_dict["Angle"] = None else: OutStruct_dict["Angle"] = self.Angle.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) OutStruct_dict["Nt_tot"] = self.Nt_tot OutStruct_dict["Na_tot"] = self.Na_tot OutStruct_dict["logger_name"] = self.logger_name if self.Yr is None: OutStruct_dict["Yr"] = None else: OutStruct_dict["Yr"] = self.Yr.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) if self.Vr is None: OutStruct_dict["Vr"] = None else: OutStruct_dict["Vr"] = self.Vr.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) if self.Ar is None: OutStruct_dict["Ar"] = None else: OutStruct_dict["Ar"] = self.Ar.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) if self.meshsolution is None: OutStruct_dict["meshsolution"] = None else: OutStruct_dict["meshsolution"] = self.meshsolution.as_dict( type_handle_ndarray=type_handle_ndarray, keep_function=keep_function, **kwargs ) OutStruct_dict["FEA_dict"] = ( self.FEA_dict.copy() if self.FEA_dict is not None else None ) # The class name is added to the dict for deserialisation purpose OutStruct_dict["__class__"] = "OutStruct" return OutStruct_dict def _set_None(self): """Set all the properties to None (except pyleecan object)""" self.Time = None self.Angle = None self.Nt_tot = None self.Na_tot = None self.logger_name = None self.Yr = None self.Vr = None self.Ar = None if self.meshsolution is not None: self.meshsolution._set_None() self.FEA_dict = None def _get_Time(self): """getter of Time""" return self._Time def _set_Time(self, value): """setter of Time""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "SciDataTool.Classes", value.get("__class__"), "Time" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = Data() check_var("Time", value, "Data") self._Time = value Time = property( fget=_get_Time, fset=_set_Time, doc=u"""Structural time Data object :Type: SciDataTool.Classes.DataND.Data """, ) def _get_Angle(self): """getter of Angle""" return self._Angle def _set_Angle(self, value): """setter of Angle""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "SciDataTool.Classes", value.get("__class__"), "Angle" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = Data() check_var("Angle", value, "Data") self._Angle = value Angle = property( fget=_get_Angle, fset=_set_Angle, doc=u"""Structural position Data object :Type: SciDataTool.Classes.DataND.Data """, ) def _get_Nt_tot(self): """getter of Nt_tot""" return self._Nt_tot def _set_Nt_tot(self, value): """setter of Nt_tot""" check_var("Nt_tot", value, "int") self._Nt_tot = value Nt_tot = property( fget=_get_Nt_tot, fset=_set_Nt_tot, doc=u"""Length of the time vector :Type: int """, ) def _get_Na_tot(self): """getter of Na_tot""" return self._Na_tot def _set_Na_tot(self, value): """setter of Na_tot""" check_var("Na_tot", value, "int") self._Na_tot = value Na_tot = property( fget=_get_Na_tot, fset=_set_Na_tot, doc=u"""Length of the angle vector :Type: int """, ) def _get_logger_name(self): """getter of logger_name""" return self._logger_name def _set_logger_name(self, value): """setter of logger_name""" check_var("logger_name", value, "str") self._logger_name = value logger_name = property( fget=_get_logger_name, fset=_set_logger_name, doc=u"""Name of the logger to use :Type: str """, ) def _get_Yr(self): """getter of Yr""" return self._Yr def _set_Yr(self, value): """setter of Yr""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "SciDataTool.Classes", value.get("__class__"), "Yr" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = DataND() check_var("Yr", value, "DataND") self._Yr = value Yr = property( fget=_get_Yr, fset=_set_Yr, doc=u"""Displacement output :Type: SciDataTool.Classes.DataND.DataND """, ) def _get_Vr(self): """getter of Vr""" return self._Vr def _set_Vr(self, value): """setter of Vr""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "SciDataTool.Classes", value.get("__class__"), "Vr" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = DataND() check_var("Vr", value, "DataND") self._Vr = value Vr = property( fget=_get_Vr, fset=_set_Vr, doc=u"""Velocity output :Type: SciDataTool.Classes.DataND.DataND """, ) def _get_Ar(self): """getter of Ar""" return self._Ar def _set_Ar(self, value): """setter of Ar""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "SciDataTool.Classes", value.get("__class__"), "Ar" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = DataND() check_var("Ar", value, "DataND") self._Ar = value Ar = property( fget=_get_Ar, fset=_set_Ar, doc=u"""Acceleration output :Type: SciDataTool.Classes.DataND.DataND """, ) def _get_meshsolution(self): """getter of meshsolution""" return self._meshsolution def _set_meshsolution(self, value): """setter of meshsolution""" if isinstance(value, str): # Load from file value = load_init_dict(value)[1] if isinstance(value, dict) and "__class__" in value: class_obj = import_class( "pyleecan.Classes", value.get("__class__"), "meshsolution" ) value = class_obj(init_dict=value) elif type(value) is int and value == -1: # Default constructor value = MeshSolution() check_var("meshsolution", value, "MeshSolution") self._meshsolution = value if self._meshsolution is not None: self._meshsolution.parent = self meshsolution = property( fget=_get_meshsolution, fset=_set_meshsolution, doc=u"""FEA software mesh and solution :Type: MeshSolution """, ) def _get_FEA_dict(self): """getter of FEA_dict""" return self._FEA_dict def _set_FEA_dict(self, value): """setter of FEA_dict""" if type(value) is int and value == -1: value = dict() check_var("FEA_dict", value, "dict") self._FEA_dict = value FEA_dict = property( fget=_get_FEA_dict, fset=_set_FEA_dict, doc=u"""dictionary containing the main FEA parameter :Type: dict """, )
assert type(init_dict) is dict
incoming.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Modified to ignore SSL verification, since I can't currently # get it to accept proper SSL connections from the Omni CA import os import requests import urllib3 # Silence the SubjectAltNameWarning that our self-signed CA gives urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning) __all__ = ['Webhook'] class InvalidPayload(Exception): pass class HTTPError(Exception): pass class Webhook(object): """ Interacts with a Mattermost incoming webhook. """ def __init__(self, url, api_key, channel=None, icon_url=None, username=None): self.api_key = api_key self.channel = channel self.icon_url = icon_url self.username = username self.url = url self.dir = os.path.dirname(__file__) # a cert may be needed if you're on a secure office network # self.cert_file_path = os.path.join(self.dir, '../certificate_ca.pem') def __setitem__(self, channel, payload): if isinstance(payload, dict):
else: message = payload payload = {} self.send(message, **payload) @property def incoming_hook_url(self): return '{}/hooks/{}'.format(self.url, self.api_key) def send(self, message, channel=None, icon_url=None, username=None): payload = {'text': message} if channel or self.channel: payload['channel'] = channel or self.channel if icon_url or self.icon_url: payload['icon_url'] = icon_url or self.icon_url if username or self.username: payload['username'] = username or self.username r = requests.post(self.incoming_hook_url, json=payload) # Or with the cert: # r = requests.post(self.incoming_hook_url, json=payload, verify=self.cert_file_path) if r.status_code != 200: raise HTTPError(r.text)
try: message = payload.pop('text') except KeyError: raise InvalidPayload('missing "text" key')
test_cde_io.py
import os import shutil from shutil import Error import unittest import numpy as np import tensorflow as tf from cdes_for_testing import all_cdes from cfl.dataset import Dataset ''' The following code runs all tests in CondExpInputTests on all implemented CondExpXxxx classes. ''' def make_cde_io_tests(cond_exp_class): # generic test class for any CondExpBase descendant # (passed in as cond_exp_class) class CondExpIOTests(unittest.TestCase):
return CondExpIOTests for cond_exp_class in all_cdes: class ConcreteIOTests(make_cde_io_tests(cond_exp_class)): pass
def setUp(self): # overriden unittest.TestCase method that will be # called in initializaiton self.data_info = { 'X_dims' : (10,3), 'Y_dims' : (10,2), 'Y_type' : 'continuous'} self.params = { 'show_plot' : False, 'n_epochs' : 2} self.ceb = cond_exp_class(self.data_info, self.params) ## INIT ############################################################### def test_init_wrong_input_types(self): data_info = 'str is bad' params = 'these are not params' self.assertRaises(AssertionError, cond_exp_class, data_info, params) def test_init_wrong_data_info_keys(self): data_info = {} params = {} self.assertRaises(AssertionError, cond_exp_class, data_info, params) def test_init_wrong_data_info_value_types(self): data_info = {'X_dims' : None, 'Y_dims' : None, 'Y_type' : None} params = {} self.assertRaises(AssertionError, cond_exp_class, data_info, params) def test_init_wrong_data_info_values(self): data_info = { 'X_dims' : (0,0), 'Y_dims' : (0,0), 'Y_type' : 'continuous'} params = {} self.assertRaises(AssertionError, cond_exp_class, data_info, params) data_info = { 'X_dims' : (10,3), 'Y_dims' : (12,2), 'Y_type' : 'continuous'} params = {} self.assertRaises(AssertionError, cond_exp_class, data_info, params) def test_init_correct_inputs(self): data_info = {'X_dims' : (10,3), 'Y_dims' : (10,2), 'Y_type' : 'continuous'} params = {} ceb = cond_exp_class(data_info, params) ## SAVE_BLOCK ######################################################### def test_save_block_wrong_input_type(self): path = 123 self.assertRaises(AssertionError, self.ceb.save_block, path) def test_save_block_correct_input_type(self): path = 'not/a/real/path' self.ceb.save_block(path) shutil.rmtree('not') ## LOAD_BLOCK ######################################################### def test_load_block_wrong_input_type(self): path = 123 self.assertRaises(AssertionError, self.ceb.load_block, path) def test_load_block_correct_input_type(self): # should only be run after test_save_block_correct_input_type so # there is something to load path = 'not/a/real/path' self.ceb.save_block(path) self.ceb.load_block(path) shutil.rmtree('not') # check and reset state assert self.ceb.trained, 'CDE should be trained after loading' self.ceb.trained = False ### TRAIN ############################################################ def test_train_wrong_input_type(self): dataset = 'this is not a Dataset' prev_results = 'this is not a dict' self.assertRaises(AssertionError, self.ceb.train, dataset, prev_results) def test_train_correct_input_type(self): dataset = Dataset(X=np.ones(self.data_info['X_dims']), Y=np.zeros(self.data_info['Y_dims'])) # what we expect from train outputs tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx'] tshapes = {'train_loss' : (self.params['n_epochs'],), 'val_loss' : (self.params['n_epochs'],), 'pyx' : (self.data_info['Y_dims']) } for prev_results in [None, {}]: # reset self.ceb.trained = False train_results = self.ceb.train(dataset, prev_results) # check state assert self.ceb.trained, 'CDE should be trained after loading' # check outputs assert set(train_results.keys())==set(tkeys), \ f'train should return dict with keys: {tkeys}' for k in tshapes.keys(): assert tshapes[k]==np.array(train_results[k]).shape, \ f'expected {k} to have shape {tshapes[k]} but got \ {train_results[k].shape}' def test_train_twice(self): dataset = Dataset(X=np.ones(self.data_info['X_dims']), Y=np.zeros(self.data_info['Y_dims'])) prev_results = None # reset self.ceb.trained = False # what we expect from train outputs first time tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx'] train_results = self.ceb.train(dataset, prev_results) # check state and outputs assert self.ceb.trained, 'CDE should be trained after loading' assert set(train_results.keys())==set(tkeys), \ f'train should return dict with keys: {tkeys}' # what we expect from train outputs second time tkeys = ['pyx'] train_results = self.ceb.train(dataset, prev_results) # check state and outputs assert self.ceb.trained, 'CDE should be trained after loading' assert set(train_results.keys())==set(tkeys), \ f'train should return dict with keys: {tkeys}' ### PREDICT ########################################################## def test_predict_wrong_input_type(self): # artifically set CDE trained = True self.ceb.trained = True dataset = 'this is not a Dataset' prev_results = 'this is not a dict' self.assertRaises(AssertionError, self.ceb.predict, dataset, prev_results) def test_predict_correct_input_type(self): dataset = Dataset(X=np.ones(self.data_info['X_dims']), Y=np.zeros(self.data_info['Y_dims'])) prev_results = None for prev_results in [None, {}]: self.ceb.train(dataset, prev_results) pred_results = self.ceb.predict(dataset, prev_results) # check output assert set(pred_results.keys())==set(['pyx']), f'pred_results \ keys should contain pyx, but contains {pred_results.keys()}' assert pred_results['pyx'].shape==self.data_info['Y_dims'], \ f"expected {self.data_info['Y_dims']} but got \ {pred_results['pyx'].shape}" ### EVALUATE ######################################################### def test_evaluate_wrong_input_type(self): # artifically set CDE trained = True self.ceb.trained = True dataset = 'this is not a Dataset' prev_results = 'this is not a dict' self.assertRaises(AssertionError, self.ceb.evaluate, dataset) def test_evaluate_correct_input_type(self): dataset = Dataset(X=np.ones(self.data_info['X_dims']), Y=np.zeros(self.data_info['Y_dims'])) prev_results = None self.ceb.train(dataset, prev_results) score = self.ceb.evaluate(dataset) assert score.shape==() assert score.dtype==np.float32 ### BUILD_MODEL ###################################################### def test_build_model(self): assert isinstance(self.ceb._build_model(), tf.keras.Sequential)
winit_wasm.rs
use bevy::{ input::{ keyboard::KeyboardInput, mouse::{MouseButtonInput, MouseMotion, MouseWheel}, }, prelude::*, }; fn main() { App::new() .insert_resource(WindowDescriptor { width: 300., height: 300., ..Default::default() }) .add_plugins(DefaultPlugins) // One time greet .add_startup_system(hello_wasm_system) // Track ticks (sanity check, whether game loop is running) .add_system(counter) // Track input events .add_system(track_input_events) .run(); } fn hello_wasm_system() { info!("hello wasm"); } fn counter(mut state: Local<CounterState>, time: Res<Time>) { if state.count % 60 == 0 { info!( "tick {} @ {:?} [Δ{}]", state.count, time.time_since_startup(), time.delta_seconds() ); } state.count += 1; } #[derive(Default)] struct CounterState { count: u32, } fn track_input_events( mut ev_keys: EventReader<KeyboardInput>, mut ev_cursor: EventReader<CursorMoved>, mut ev_motion: EventReader<MouseMotion>, mut ev_mousebtn: EventReader<MouseButtonInput>, mut ev_scroll: EventReader<MouseWheel>, ) { // Keyboard input for ev in ev_keys.iter() { if ev.state.is_pressed() { info!("Just pressed key: {:?}", ev.key_code); } else { info!("Just released key: {:?}", ev.key_code); } } // Absolute cursor position (in window coordinates)
info!("Cursor at: {}", ev.position); } // Relative mouse motion for ev in ev_motion.iter() { info!("Mouse moved {} pixels", ev.delta); } // Mouse buttons for ev in ev_mousebtn.iter() { if ev.state.is_pressed() { info!("Just pressed mouse button: {:?}", ev.button); } else { info!("Just released mouse button: {:?}", ev.button); } } // scrolling (mouse wheel, touchpad, etc.) for ev in ev_scroll.iter() { info!( "Scrolled vertically by {} and horizontally by {}.", ev.y, ev.x ); } }
for ev in ev_cursor.iter() {
defined_struct.rs
//! [DefinedStruct] and [DefinedStructTable] perform downcast operations //! via into() and into_iter() trait functions for [UndefinedStruct]. use serde::Serialize; use core::iter::FromIterator; #[cfg(feature = "no_std")] use alloc::vec::{Vec, IntoIter}; #[cfg(not(feature = "no_std"))] use std::vec::IntoIter; use crate::core::UndefinedStruct; use super::*; /// # SMBIOS Standard Defined Structure /// /// Represents one of the SMBIOS defined structures or, in the case /// of an OEM defined structure, as a generically defined Unknown variant #[derive(Serialize, Debug)] pub enum DefinedStruct<'a> { /// BIOS Information (Type 0) Information(SMBiosInformation<'a>), /// System Information (Type 1) SystemInformation(SMBiosSystemInformation<'a>), /// Baseboard (or Module) Information (Type 2) BaseBoardInformation(SMBiosBaseboardInformation<'a>), /// System Enclosure or Chassis (Type 3) SystemChassisInformation(SMBiosSystemChassisInformation<'a>), /// Processor Information (Type 4) ProcessorInformation(SMBiosProcessorInformation<'a>), /// Memory Controller Information (Type 5, Obsolete) MemoryControllerInformation(SMBiosMemoryControllerInformation<'a>), /// Memory Module Information (Type 6, Obsolete) MemoryModuleInformation(SMBiosMemoryModuleInformation<'a>), /// Cache Informaiton (Type 7) CacheInformation(SMBiosCacheInformation<'a>), /// Port Connector Information (Type 8) PortConnectorInformation(SMBiosPortConnectorInformation<'a>), /// System Slot Information (Type 9) SystemSlot(SMBiosSystemSlot<'a>), /// On Board Devices Information (Type 10, Obsolete) OnBoardDeviceInformation(SMBiosOnBoardDeviceInformation<'a>), /// OEM Strings (Type 11) OemStrings(SMBiosOemStrings<'a>), /// System Configuration Options (Type 12) SystemConfigurationOptions(SMBiosSystemConfigurationOptions<'a>), /// BIOS Language Information (Type 13) LanguageInformation(SMBiosBiosLanguageInformation<'a>), /// Group Associations (Type 14) GroupAssociations(SMBiosGroupAssociations<'a>), /// System Event Log (Type 15) EventLog(SMBiosSystemEventLog<'a>), /// Physical Memory Array (Type 16) PhysicalMemoryArray(SMBiosPhysicalMemoryArray<'a>), /// Memory Device (Type 17) MemoryDevice(SMBiosMemoryDevice<'a>), /// 32-Bit Memory Error Information (Type 18) MemoryErrorInformation32Bit(SMBiosMemoryErrorInformation32<'a>), /// Memory Array Mapped Address (Type 19) MemoryArrayMappedAddress(SMBiosMemoryArrayMappedAddress<'a>), /// Memory Device Mapped Address (Type 20) MemoryDeviceMappedAddress(SMBiosMemoryDeviceMappedAddress<'a>), /// Built-in Pointing Device (Type 21) BuiltInPointingDevice(SMBiosBuiltInPointingDevice<'a>), /// Portable Battery (Type 22) PortableBattery(SMBiosPortableBattery<'a>), /// System Reset (Type 23) SystemReset(SMBiosSystemReset<'a>), /// Hardware Security (Type 24) HardwareSecurity(SMBiosHardwareSecurity<'a>), /// System Power Controls (Type 25) SystemPowerControls(SMBiosSystemPowerControls<'a>), /// Voltage Probe (Type 26) VoltageProbe(SMBiosVoltageProbe<'a>), /// Cooling Device (Type 27) CoolingDevice(SMBiosCoolingDevice<'a>), /// Temperature Probe (Type 28) TemperatureProbe(SMBiosTemperatureProbe<'a>), /// Electrical Current Probe (Type 29) ElectricalCurrentProbe(SMBiosElectricalCurrentProbe<'a>), /// Out-of-Band Remote Access (Type 30) OutOfBandRemoteAccess(SMBiosOutOfBandRemoteAccess<'a>), /// Boot Integrity Services (BIS) (Type 31) BisEntryPoint(SMBiosBisEntryPoint<'a>), /// System Boot Information (Type 32) SystemBootInformation(SMBiosSystemBootInformation<'a>), /// 64-Bit Memory Error Information (Type 33) MemoryErrorInformation64Bit(SMBiosMemoryErrorInformation64<'a>), /// Management Device (Type 34) ManagementDevice(SMBiosManagementDevice<'a>), /// Management Device Component (Type 35) ManagementDeviceComponent(SMBiosManagementDeviceComponent<'a>), /// Management Device Threshold Data (Type 36) ManagementDeviceThresholdData(SMBiosManagementDeviceThresholdData<'a>), /// Memory Channel (Type 37) MemoryChannel(SMBiosMemoryChannel<'a>), /// IPMI Device Information (Type 38) IpmiDeviceInformation(SMBiosIpmiDeviceInformation<'a>), /// Power Supply (Type 39) SystemPowerSupply(SMBiosSystemPowerSupply<'a>), /// Additional Information (Type 40) AdditionalInformation(SMBiosAdditionalInformation<'a>), /// Onboard Devices Extended Information (Type 41) OnboardDevicesExtendedInformation(SMBiosOnboardDevicesExtendedInformation<'a>), /// Management Controller Host Interface (Type 42) ManagementControllerHostInterface(SMBiosManagementControllerHostInterface<'a>), /// TPM Device (Type 43) TpmDevice(SMBiosTpmDevice<'a>), /// Processor Additional Information (Type 44) ProcessorAdditionalInformation(SMBiosProcessorAdditionalInformation<'a>), /// Inactive (Type 126) Inactive(SMBiosInactive<'a>), /// End-of-Table (Type 127) EndOfTable(SMBiosEndOfTable<'a>), /// OEM-Defined or Unknown Structure /// /// - A structure with a type value not yet defined, such as by a DMTF specification /// that supercedes the types known by this library /// - An OEM type with a value > 127. Undefined(SMBiosUnknown<'a>), } impl<'a> From<&'a UndefinedStruct> for DefinedStruct<'a> { fn from(undefined_struct: &'a UndefinedStruct) -> Self { match undefined_struct.header.struct_type() { SMBiosInformation::STRUCT_TYPE => { DefinedStruct::Information(SMBiosInformation::new(undefined_struct)) } SMBiosSystemInformation::STRUCT_TYPE => { DefinedStruct::SystemInformation(SMBiosSystemInformation::new(undefined_struct)) } SMBiosBaseboardInformation::STRUCT_TYPE => DefinedStruct::BaseBoardInformation( SMBiosBaseboardInformation::new(undefined_struct), ), SMBiosSystemChassisInformation::STRUCT_TYPE => DefinedStruct::SystemChassisInformation( SMBiosSystemChassisInformation::new(undefined_struct), ), SMBiosProcessorInformation::STRUCT_TYPE => DefinedStruct::ProcessorInformation( SMBiosProcessorInformation::new(undefined_struct), ), SMBiosMemoryControllerInformation::STRUCT_TYPE => { DefinedStruct::MemoryControllerInformation(SMBiosMemoryControllerInformation::new( undefined_struct, )) } SMBiosMemoryModuleInformation::STRUCT_TYPE => DefinedStruct::MemoryModuleInformation( SMBiosMemoryModuleInformation::new(undefined_struct), ), SMBiosCacheInformation::STRUCT_TYPE => { DefinedStruct::CacheInformation(SMBiosCacheInformation::new(undefined_struct)) } SMBiosPortConnectorInformation::STRUCT_TYPE => DefinedStruct::PortConnectorInformation( SMBiosPortConnectorInformation::new(undefined_struct), ), SMBiosSystemSlot::STRUCT_TYPE => { DefinedStruct::SystemSlot(SMBiosSystemSlot::new(undefined_struct)) } SMBiosOnBoardDeviceInformation::STRUCT_TYPE => DefinedStruct::OnBoardDeviceInformation( SMBiosOnBoardDeviceInformation::new(undefined_struct), ), SMBiosOemStrings::STRUCT_TYPE => { DefinedStruct::OemStrings(SMBiosOemStrings::new(undefined_struct)) } SMBiosSystemConfigurationOptions::STRUCT_TYPE => { DefinedStruct::SystemConfigurationOptions(SMBiosSystemConfigurationOptions::new( undefined_struct, )) } SMBiosBiosLanguageInformation::STRUCT_TYPE => DefinedStruct::LanguageInformation( SMBiosBiosLanguageInformation::new(undefined_struct), ), SMBiosGroupAssociations::STRUCT_TYPE => { DefinedStruct::GroupAssociations(SMBiosGroupAssociations::new(undefined_struct)) } SMBiosSystemEventLog::STRUCT_TYPE => { DefinedStruct::EventLog(SMBiosSystemEventLog::new(undefined_struct)) } SMBiosPhysicalMemoryArray::STRUCT_TYPE => { DefinedStruct::PhysicalMemoryArray(SMBiosPhysicalMemoryArray::new(undefined_struct)) } SMBiosMemoryDevice::STRUCT_TYPE => { DefinedStruct::MemoryDevice(SMBiosMemoryDevice::new(undefined_struct)) } SMBiosMemoryErrorInformation32::STRUCT_TYPE => { DefinedStruct::MemoryErrorInformation32Bit(SMBiosMemoryErrorInformation32::new( undefined_struct, )) } SMBiosMemoryArrayMappedAddress::STRUCT_TYPE => DefinedStruct::MemoryArrayMappedAddress( SMBiosMemoryArrayMappedAddress::new(undefined_struct), ), SMBiosMemoryDeviceMappedAddress::STRUCT_TYPE => { DefinedStruct::MemoryDeviceMappedAddress(SMBiosMemoryDeviceMappedAddress::new( undefined_struct, )) } SMBiosBuiltInPointingDevice::STRUCT_TYPE => DefinedStruct::BuiltInPointingDevice( SMBiosBuiltInPointingDevice::new(undefined_struct), ), SMBiosPortableBattery::STRUCT_TYPE => { DefinedStruct::PortableBattery(SMBiosPortableBattery::new(undefined_struct)) } SMBiosSystemReset::STRUCT_TYPE => { DefinedStruct::SystemReset(SMBiosSystemReset::new(undefined_struct)) } SMBiosHardwareSecurity::STRUCT_TYPE => { DefinedStruct::HardwareSecurity(SMBiosHardwareSecurity::new(undefined_struct)) } SMBiosSystemPowerControls::STRUCT_TYPE => { DefinedStruct::SystemPowerControls(SMBiosSystemPowerControls::new(undefined_struct)) } SMBiosVoltageProbe::STRUCT_TYPE => { DefinedStruct::VoltageProbe(SMBiosVoltageProbe::new(undefined_struct)) } SMBiosCoolingDevice::STRUCT_TYPE => { DefinedStruct::CoolingDevice(SMBiosCoolingDevice::new(undefined_struct)) } SMBiosTemperatureProbe::STRUCT_TYPE => { DefinedStruct::TemperatureProbe(SMBiosTemperatureProbe::new(undefined_struct)) } SMBiosElectricalCurrentProbe::STRUCT_TYPE => DefinedStruct::ElectricalCurrentProbe( SMBiosElectricalCurrentProbe::new(undefined_struct), ), SMBiosOutOfBandRemoteAccess::STRUCT_TYPE => DefinedStruct::OutOfBandRemoteAccess( SMBiosOutOfBandRemoteAccess::new(undefined_struct), ), SMBiosBisEntryPoint::STRUCT_TYPE => { DefinedStruct::BisEntryPoint(SMBiosBisEntryPoint::new(undefined_struct)) } SMBiosSystemBootInformation::STRUCT_TYPE => DefinedStruct::SystemBootInformation( SMBiosSystemBootInformation::new(undefined_struct), ), SMBiosMemoryErrorInformation64::STRUCT_TYPE => { DefinedStruct::MemoryErrorInformation64Bit(SMBiosMemoryErrorInformation64::new( undefined_struct, )) } SMBiosManagementDevice::STRUCT_TYPE => { DefinedStruct::ManagementDevice(SMBiosManagementDevice::new(undefined_struct)) } SMBiosManagementDeviceComponent::STRUCT_TYPE => { DefinedStruct::ManagementDeviceComponent(SMBiosManagementDeviceComponent::new( undefined_struct, )) } SMBiosManagementDeviceThresholdData::STRUCT_TYPE => { DefinedStruct::ManagementDeviceThresholdData( SMBiosManagementDeviceThresholdData::new(undefined_struct), ) } SMBiosMemoryChannel::STRUCT_TYPE => { DefinedStruct::MemoryChannel(SMBiosMemoryChannel::new(undefined_struct)) } SMBiosIpmiDeviceInformation::STRUCT_TYPE => DefinedStruct::IpmiDeviceInformation( SMBiosIpmiDeviceInformation::new(undefined_struct), ), SMBiosSystemPowerSupply::STRUCT_TYPE => { DefinedStruct::SystemPowerSupply(SMBiosSystemPowerSupply::new(undefined_struct)) } SMBiosAdditionalInformation::STRUCT_TYPE => DefinedStruct::AdditionalInformation( SMBiosAdditionalInformation::new(undefined_struct), ), SMBiosOnboardDevicesExtendedInformation::STRUCT_TYPE => { DefinedStruct::OnboardDevicesExtendedInformation( SMBiosOnboardDevicesExtendedInformation::new(undefined_struct), ) } SMBiosManagementControllerHostInterface::STRUCT_TYPE => { DefinedStruct::ManagementControllerHostInterface( SMBiosManagementControllerHostInterface::new(undefined_struct), ) } SMBiosTpmDevice::STRUCT_TYPE => { DefinedStruct::TpmDevice(SMBiosTpmDevice::new(undefined_struct)) } SMBiosProcessorAdditionalInformation::STRUCT_TYPE => { DefinedStruct::ProcessorAdditionalInformation( SMBiosProcessorAdditionalInformation::new(undefined_struct), ) } SMBiosInactive::STRUCT_TYPE => { DefinedStruct::Inactive(SMBiosInactive::new(undefined_struct)) } SMBiosEndOfTable::STRUCT_TYPE => { DefinedStruct::EndOfTable(SMBiosEndOfTable::new(undefined_struct)) } _ => DefinedStruct::Undefined(SMBiosUnknown::new(undefined_struct)), } } } /// # Defined Struct Table /// /// Contains a list of [DefinedStruct] items. #[derive(Serialize, Debug)] pub struct DefinedStructTable<'a>(Vec<DefinedStruct<'a>>); impl<'a> DefinedStructTable<'a> { fn new() -> DefinedStructTable<'a> { DefinedStructTable(Vec::new()) } fn add(&mut self, elem: DefinedStruct<'a>) { self.0.push(elem); } } impl<'a> IntoIterator for DefinedStructTable<'a> { type Item = DefinedStruct<'a>; type IntoIter = IntoIter<Self::Item>; fn
(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a> FromIterator<&'a UndefinedStruct> for DefinedStructTable<'a> { fn from_iter<I: IntoIterator<Item = &'a UndefinedStruct>>(iter: I) -> Self { let mut defined_struct_table = DefinedStructTable::new(); for undefined_struct in iter { defined_struct_table.add(undefined_struct.into()); } defined_struct_table } }
into_iter
motd.rs
use crate::TestCaseModel; use atelier_core::builder::traits::ErrorSource; use atelier_core::builder::{ traits, MemberBuilder, ModelBuilder, OperationBuilder, ResourceBuilder, ServiceBuilder, ShapeTraits, SimpleShapeBuilder, StructureBuilder, }; use atelier_core::model::Model; use atelier_core::Version; use std::convert::TryInto; const MESSAGE_OF_THE_DAY_AS_LINES: &[&str] = &[ "operation::example.motd#GetMessage", "operation::example.motd#GetMessage::error=>example.motd#BadDateValue", "operation::example.motd#GetMessage::input=>example.motd#GetMessageInput", "operation::example.motd#GetMessage::output=>example.motd#GetMessageOutput", "operation::example.motd#GetMessage::trait::smithy.api#readonly<={}", "resource::example.motd#Message", "resource::example.motd#Message::identifier::date=>example.motd#Date", "resource::example.motd#Message::read=>example.motd#GetMessage", "service::example.motd#MessageOfTheDay", "service::example.motd#MessageOfTheDay::resource=>example.motd#Message", "service::example.motd#MessageOfTheDay::trait::smithy.api#documentation<=\"Provides a Message of the day.\"", "service::example.motd#MessageOfTheDay::version<=\"2020-06-21\"", "string::example.motd#Date", "string::example.motd#Date::trait::smithy.api#pattern<=\"^\\d\\d\\d\\d\\-\\d\\d-\\d\\d$\"", "structure::example.motd#BadDateValue", "structure::example.motd#BadDateValue::errorMessage::trait::smithy.api#required<={}", "structure::example.motd#BadDateValue::errorMessage=>smithy.api#String", "structure::example.motd#BadDateValue::trait::smithy.api#error<=\"client\"", "structure::example.motd#GetMessageInput", "structure::example.motd#GetMessageInput::date=>example.motd#Date", "structure::example.motd#GetMessageOutput", "structure::example.motd#GetMessageOutput::message::trait::smithy.api#required<={}", "structure::example.motd#GetMessageOutput::message=>smithy.api#String", ]; /// /// Return a test case for the _message of the day_ model. /// pub fn make_message_of_the_day_model() -> TestCaseModel { let model: Model = ModelBuilder::new(Version::V10, "example.motd") .service( ServiceBuilder::new("MessageOfTheDay", "2020-06-21") .documentation("Provides a Message of the day.") .resource("Message") .into(), ) .resource( ResourceBuilder::new("Message") .identifier("date", "Date") .read("GetMessage") .into(), ) .simple_shape( SimpleShapeBuilder::string("Date") .apply_trait(traits::pattern(r"^\d\d\d\d\-\d\d-\d\d$"))
.operation( OperationBuilder::new("GetMessage") .readonly() .input("GetMessageInput") .output("GetMessageOutput") .error("BadDateValue") .into(), ) .structure( StructureBuilder::new("GetMessageInput") .member("date", "Date") .into(), ) .structure( StructureBuilder::new("GetMessageOutput") .add_member(MemberBuilder::string("message").required().into()) .into(), ) .structure( StructureBuilder::new("BadDateValue") .error_source(ErrorSource::Client) .add_member(MemberBuilder::string("errorMessage").required().into()) .into(), ) .try_into() .unwrap(); TestCaseModel { model, expected_lines: MESSAGE_OF_THE_DAY_AS_LINES.to_vec(), } }
.into(), )
wrapRename.spec.ts
import RenameTransform from './../src/index'; import { buildSchema, GraphQLObjectType, printSchema } from 'graphql'; import InMemoryLRUCache from '@graphql-mesh/cache-inmemory-lru'; import { MeshPubSub } from '@graphql-mesh/types'; import { PubSub } from '@graphql-mesh/utils'; import { wrapSchema } from '@graphql-tools/wrap'; describe('rename', () => { const schema = buildSchema(/* GraphQL */ ` type Query { my_user: MyUser! my_book: MyBook!
profile(profile_id: ID!): Profile } type MyUser { id: ID! } type Profile { id: ID! } type MyBook { id: ID! } `); let cache: InMemoryLRUCache; let pubsub: MeshPubSub; const baseDir: string = undefined; beforeEach(() => { cache = new InMemoryLRUCache(); pubsub = new PubSub(); }); it('should change the name of a type', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'MyUser', }, to: { type: 'User', }, }, ], }, cache, pubsub, baseDir, }), ], }); expect(newSchema.getType('MyUser')).toBeUndefined(); expect(newSchema.getType('User')).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should change the name of a field', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { type: 'Query', field: 'my_user', }, to: { type: 'Query', field: 'user', }, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.my_user).toBeUndefined(); expect(fieldMap.user).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should change the name of multiple type names', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { type: 'My(.*)', }, to: { type: '$1', }, useRegExpForTypes: true, }, ], }, cache, pubsub, baseDir, }), ], }); expect(newSchema.getType('MyUser')).toBeUndefined(); expect(newSchema.getType('User')).toBeDefined(); expect(newSchema.getType('MyBook')).toBeUndefined(); expect(newSchema.getType('Book')).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should change the name of multiple fields', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'Query', field: 'my_(.*)', }, to: { type: 'Query', field: '$1', }, useRegExpForFields: true, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.my_user).toBeUndefined(); expect(fieldMap.user).toBeDefined(); expect(fieldMap.my_book).toBeUndefined(); expect(fieldMap.book).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should replace the first occurrence of a substring in a field', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { type: 'Query', field: 'o(.*)', }, to: { type: 'Query', field: '$1', }, useRegExpForFields: true, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.my_book).toBeUndefined(); expect(fieldMap.my_bok).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should replace all occurrences of a substring in a type', () => { const schema = buildSchema(/* GraphQL */ ` type Query { api_user_v1_api: ApiUserV1Api! } type ApiUserV1Api { id: ID! } `); const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'Api(.*?)', }, to: { type: '$1', }, useRegExpForTypes: true, regExpFlags: 'g', }, ], }, cache, pubsub, baseDir, }), ], }); expect(newSchema.getType('ApiUserV1Api')).toBeUndefined(); expect(newSchema.getType('UserV1')).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should replace all occurrences of multiple substrings in a type', () => { const schema = buildSchema(/* GraphQL */ ` type Query { api_user_v1_api: ApiUserV1Api! } type ApiUserV1Api { id: ID! } `); const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'Api|V1(.*?)', }, to: { type: '$1', }, useRegExpForTypes: true, regExpFlags: 'g', }, ], }, cache, pubsub, baseDir, }), ], }); expect(newSchema.getType('ApiUserV1Api')).toBeUndefined(); expect(newSchema.getType('User')).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should replace all occurrences of a substring in a field', () => { const schema = buildSchema(/* GraphQL */ ` type Query { api_user_v1_api: ApiUserV1Api! } type ApiUserV1Api { id: ID! } `); const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'Query', field: 'api_|_api(.*?)', }, to: { type: 'Query', field: '$1', }, useRegExpForFields: true, regExpFlags: 'g', }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.api_user_v1_api).toBeUndefined(); expect(fieldMap.user_v1).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should replace all occurrences of multiple substrings in a field', () => { const schema = buildSchema(/* GraphQL */ ` type Query { api_user_v1_api: ApiUserV1Api! } type ApiUserV1Api { id: ID! } `); const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { renames: [ { from: { type: 'Query', field: 'api_|_api|v1_|_v1(.*?)', }, to: { type: 'Query', field: '$1', }, useRegExpForFields: true, regExpFlags: 'g', }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.api_user_v1_api).toBeUndefined(); expect(fieldMap.user).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should only affect specified type', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { type: 'Query', field: 'o(.*)', }, to: { type: 'Query', field: '$1', }, useRegExpForFields: true, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.my_book).toBeUndefined(); expect(fieldMap.my_bok).toBeDefined(); const myUserType = newSchema.getType('MyUser') as GraphQLObjectType; const myUserFields = myUserType.getFields(); expect(myUserFields.id).toBeDefined(); const myBookType = newSchema.getType('MyBook') as GraphQLObjectType; const myBookFields = myBookType.getFields(); expect(myBookFields.id).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should only affect specified field argument', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { type: 'Query', field: 'profile', argument: 'profile_id', }, to: { type: 'Query', field: 'profile', argument: 'profileId', }, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.profile.args.find(a => a.name === 'profile_id')).toBeUndefined(); expect(fieldMap.profile.args.find(a => a.name === 'profileId')).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); it('should only affect field argument only if type and field are specified', () => { const newSchema = wrapSchema({ schema, transforms: [ new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'wrap', renames: [ { from: { argument: 'profile_id', }, to: { argument: 'profileId', }, }, ], }, cache, pubsub, baseDir, }), ], }); const queryType = newSchema.getType('Query') as GraphQLObjectType; const fieldMap = queryType.getFields(); expect(fieldMap.profile.args.find(a => a.name === 'profile_id')).toBeDefined(); expect(fieldMap.profile.args.find(a => a.name === 'profileId')).toBeUndefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); // TODO it.skip('should move a root field from a root type to another', () => { const schema = buildSchema(/* GraphQL */ ` type Query { foo: String } type Mutation { bar: String } `); const transform = new RenameTransform({ apiName: '', importFn: m => import(m), config: { mode: 'bare', renames: [ { from: { type: 'Mutation', field: 'bar', }, to: { type: 'Query', field: 'bar', }, }, ], }, cache, pubsub, baseDir, }); const newSchema = transform.transformSchema(schema, {} as any); const queryType = newSchema.getType('Query') as GraphQLObjectType; const queryFieldMap = queryType.getFields(); expect(queryFieldMap.bar).toBeDefined(); expect(printSchema(newSchema)).toMatchSnapshot(); }); });
data.rs
use crate::common::Camera; use cust::{ error::CudaResult, memory::{DeviceBuffer, DeviceCopy, UnifiedBuffer}, util::SliceExt, vek::{num_traits::Zero, Vec2, Vec3}, }; use gpu_rand::DefaultRand; use path_tracer_gpu::{material::MaterialKind, scene::Scene, Object, Viewport}; use super::SEED; /// The various buffers held by the CUDA renderer. /// /// You could put these in the CUDA renderer but we separate them out for code readability. pub struct CudaRendererBuffers { /// The buffer of accumulated colors, every sample/render call adds its color to this buffer. pub accumulated_buffer: DeviceBuffer<Vec3<f32>>, /// The scaled buffer of colors, this is just the accumulated colors divided by sample count. pub scaled_buffer: DeviceBuffer<Vec3<f32>>, /// The final image buffer after denoising and postprocessing. pub out_buffer: DeviceBuffer<Vec3<u8>>, /// The scaled buffer but denoised. In the future we will use the same buffer for this. pub denoised_buffer: DeviceBuffer<Vec3<f32>>, /// The viewport used by the render kernel to emit rays. pub viewport: Viewport, /// Allocated buffer of objects in the scene. pub objects: UnifiedBuffer<Object>, /// Allocated buffer of the materials in the scene. pub materials: UnifiedBuffer<MaterialKind>, /// Per-thread randomness states. pub rand_states: UnifiedBuffer<DefaultRand>, } impl CudaRendererBuffers { pub fn
(dimensions: Vec2<usize>, camera: &Camera, scene: &Scene) -> CudaResult<Self> { let accumulated_buffer = Self::image_buffer(dimensions)?; let out_buffer = Self::image_buffer(dimensions)?; let denoised_buffer = Self::image_buffer(dimensions)?; let scaled_buffer = Self::image_buffer(dimensions)?; let objects = scene.objects.as_unified_buf()?; let materials = scene.materials.as_unified_buf()?; let mut viewport = Viewport::default(); camera.as_viewport(&mut viewport); viewport.bounds = dimensions; let rand_states = DefaultRand::initialize_states(SEED, dimensions.product()) .as_slice() .as_unified_buf()?; Ok(Self { accumulated_buffer, scaled_buffer, out_buffer, denoised_buffer, viewport, objects, materials, rand_states, }) } /// Resets and reallocates the entire scene. This may be slow because it needs to reallocate /// all of the GPU scene buffers. pub fn reset_scene(&mut self, scene: &Scene) -> CudaResult<()> { self.objects = scene.objects.as_unified_buf()?; self.materials = scene.materials.as_unified_buf()?; Ok(()) } /// Reset the renderer's view, in the buffer's case this means clearing accumulated buffers from previous samples. /// As well as changing the viewport. pub fn update_camera(&mut self, new_camera: &Camera) -> CudaResult<()> { self.accumulated_buffer = unsafe { DeviceBuffer::zeroed(self.accumulated_buffer.len())? }; new_camera.as_viewport(&mut self.viewport); Ok(()) } /// Resize the image-specific buffers for a new image size. pub fn resize(&mut self, new: Vec2<usize>) -> CudaResult<()> { self.viewport.bounds = new; self.accumulated_buffer = Self::image_buffer(new)?; self.out_buffer = Self::image_buffer(new)?; self.denoised_buffer = Self::image_buffer(new)?; self.scaled_buffer = Self::image_buffer(new)?; self.rand_states = DefaultRand::initialize_states(SEED, new.product()) .as_slice() .as_unified_buf()?; Ok(()) } /// Swaps out a material at a specific index. pub fn update_material(&mut self, idx: usize, new: MaterialKind) { self.materials[idx] = new; } /// Swaps out an object at a specific index. pub fn update_object(&mut self, idx: usize, new: Object) { self.objects[idx] = new; } // could also use the convenience method on optix::denoiser::Image for this fn image_buffer<T: DeviceCopy + Zero>( dimensions: Vec2<usize>, ) -> CudaResult<DeviceBuffer<Vec3<T>>> { unsafe { DeviceBuffer::zeroed(dimensions.product()) } } }
new
RadioButtonColumn.ts
import Column from './Column'; import Html from '../helpers/Html'; import Model from '../base/Model'; /** * RadioButtonColumn displays a column of radio buttons in a grid view. * * To add a RadioButtonColumn to the [[GridView]], add it to the [[GridView.columns|columns]] configuration as follows: * * ```js * { * columns: [ * // ... * { * class: 'RadioButtonColumn', * radioOptions: function (model) { * return { * value: model['value'],
* } * ] * } * ``` */ export default class DataColumn extends Column { /** * The name of the input radio button input fields. */ public name = 'radioButtonSelection'; /** * Closure the HTML attributes for the radio buttons. This can either be an array of * attributes or an anonymous function ([[Closure]]) returning such an array. * * The signature of the function should be as follows: `function (model, key, index, column)` * where `model`, `key`, and `index` refer to the model, key and index of the row currently being rendered * and `column` is a reference to the [[RadioButtonColumn]] object. * * A function may be used to assign different attributes to different rows based on the data in that row. * Specifically if you want to set a different value for the radio button you can use this option * in the following way (in this example using the `name` attribute of the model): * * ```js * { * radioOptions: function (model, key, index, column) { * return {value: model.attribute} * } * } * ``` * @see [[Html.renderTagAttributes]] for details on how attributes are being rendered. */ public radioOptions: { [key: string]: any } = {}; public constructor(config: { [key: string]: any }) { super(config); Object.assign(this, config); } /** * {@inheritdoc} * @throws InvalidConfigException if [[name]] is not set. */ public async init() { await super.init.call(this); if (this.name.length === 0) throw new Error('The "name" property must be set.'); } /** @inheritdoc */ protected async renderDataCellContent(model: Model, key: string, index: number) { if (this.content !== undefined) return super.renderDataCellContent(model, key, index); let options; if (typeof this.radioOptions === 'function') options = this.radioOptions(model, key, index, this); else { options = this.radioOptions; if (options.value === undefined) options.value = typeof key !== 'string' ? JSON.stringify(key) : key; } const checked = options.checked !== undefined ? options.checked : false; return Html.radio(this.name, checked, options); } }
* checked: model['value'] * } * }
drop_zst.rs
// check-fail #![feature(const_precise_live_drops)] struct S; impl Drop for S { fn drop(&mut self) { println!("Hello!");
} } const fn foo() { let s = S; //~ destructor } fn main() {}
custom_wait_rules_test.go
// Copyright 2020 VMware, Inc. // SPDX-License-Identifier: Apache-2.0 package e2e import ( "fmt" "strings" "testing" "github.com/stretchr/testify/require" ) func TestCustomWaitRules(t *testing.T)
{ env := BuildEnv(t) logger := Logger{} kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} kubectl := Kubectl{t, env.Namespace, logger} config := ` apiVersion: kapp.k14s.io/v1alpha1 kind: Config waitRules: - ytt: funcContractV1: resource.star: | def is_done(resource): state = resource.status.currentState if state == "Failed": return {"done": True, "successful": False, "message": "Current state as Failed"} elif state == "Running": return {"done": True, "successful": True, "message": "Current state as Running"} else: return {"done": True, "successful": False, "message": "Not in Failed or Running state"} end end resourceMatchers: - apiVersionKindMatcher: {apiVersion: stable.example.com/v1, kind: CronTab} ` crdYaml := ` apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: crontabs.stable.example.com spec: group: stable.example.com versions: - name: v1 served: true storage: true schema: openAPIV3Schema: type: object properties: spec: type: object properties: cronSpec: type: string image: type: string replicas: type: integer status: type: object properties: currentState: type: string scope: Namespaced names: plural: crontabs singular: crontab kind: CronTab --- ` crYaml := ` apiVersion: "stable.example.com/v1" kind: CronTab metadata: name: my-new-cron-object-1 spec: cronSpec: "* * * * */5" image: my-awesome-cron-image status: currentState: %s --- ` name := "test-custom-wait-rule-contract-v1" cleanUp := func() { kapp.Run([]string{"delete", "-a", name}) } cleanUp() defer cleanUp() logger.Section("deploy resource with current state as running", func() { res, err := kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{ StdinReader: strings.NewReader(crdYaml + fmt.Sprintf(crYaml, "Running") + config)}) if err != nil { require.Errorf(t, err, "Expected CronTab to be deployed") } require.Contains(t, res, "Current state as Running") NewPresentClusterResource("CronTab", "my-new-cron-object-1", env.Namespace, kubectl) }) cleanUp() logger.Section("deploy resource with current state as failed", func() { res, err := kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{ StdinReader: strings.NewReader(crdYaml + fmt.Sprintf(crYaml, "Failed") + config), AllowError: true, }) require.Contains(t, res, "Current state as Failed") require.Contains(t, err.Error(), "kapp: Error: waiting on reconcile crontab/my-new-cron-object-1") }) }
context_test.go
/* Copyright SecureKey Technologies Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package blockvisitor import ( "testing" "github.com/stretchr/testify/require" ) func TestContext(t *testing.T)
{ ccEvent := &CCEvent{ BlockNum: 1000, TxID: txID1, TxNum: 12, } lsccWrite := &LSCCWrite{ BlockNum: 1000, TxID: txID1, TxNum: 12, } configUpdate := &ConfigUpdate{ BlockNum: 1000, } read := &Read{ BlockNum: 1000, TxID: txID1, TxNum: 12, } write := &Write{ BlockNum: 1000, TxID: txID1, TxNum: 12, } ctx := newContext( UnmarshalErr, channelID, 1000, withTxID(txID1), withTxNum(12), withCCEvent(ccEvent), withConfigUpdate(configUpdate), withLSCCWrite(lsccWrite), withRead(read), withWrite(write), ) require.NotNil(t, ctx) require.Equal(t, UnmarshalErr, ctx.Category) require.Equal(t, channelID, ctx.ChannelID) require.Equal(t, uint64(1000), ctx.BlockNum) require.Equal(t, txID1, ctx.TxID) require.Equal(t, ccEvent, ctx.CCEvent) require.Equal(t, configUpdate, ctx.ConfigUpdate) require.Equal(t, lsccWrite, ctx.LSCCWrite) require.Equal(t, read, ctx.Read) require.Equal(t, write, ctx.Write) s := ctx.String() require.Contains(t, s, "ChannelID: testchannel") require.Contains(t, s, "Category: UNMARSHAL_ERROR") require.Contains(t, s, "Block: 1000") require.Contains(t, s, "TxNum: 12") require.Contains(t, s, "TxID: tx1") require.Contains(t, s, "Read:") require.Contains(t, s, "Write:") require.Contains(t, s, "CCEvent:") require.Contains(t, s, "TxID:tx1") require.Contains(t, s, "TxNum:12") require.Contains(t, s, "ConfigUpdate:") }
Session.js
import React from "react" import { Container } from "semantic-ui-react" const Session = () => ( <Container style={{ marginTop: "7em" }}> <h1>Session</h1> </Container> )
export default Session
skillsLanguage.js
import React from "react";
<div className="max-w-4xl mx-auto" > <h2 className="text-3xl font-bold text-center">Conocimientos en Lenguajes de Programación</h2> <div className="flex mt-8"> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">PHP</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">9/10</span> </div> </div> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">C#</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">8/10</span> </div> </div> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">Javascript</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">9/10</span> </div> </div> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">Python</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">8/10</span> </div> </div> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">Java</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">7/10</span> </div> </div> <div className="shadow p-8 bg-white mr-4"> <h4 className="font-bold">C++</h4> <div className="text-center"> <span className="inline-block bg-purple-200 text-purple-700 p-2 m-2 radius">7/10</span> </div> </div> </div> </div> </div> </section> );
export default() =>( <section> <div className="mt-24">
index_20210920203947.js
/* v */ // 导入 index.art 模板文件 import router from "../routes" import indexTpl from "../views/index.art" // 导入登录页 signin.art的模板文件 import signinTpl from "../views/signin.art" import usersTpl from "../views/users.art" import usersListTpl from "../views/users-list.art" import usersListPageTpl from "../views/users-pages.art" // 将index.art模板文件的内容保存到 变量中 const htmlIndex = indexTpl({}) const htmlSignin = signinTpl({}) // 定义每页几条 const pageSize = 10 // 临时存储读取的数据 let dataList = [] // 点击提交按钮 const _handleSubmit = (router) => { return (e) => { console.log(e); e.preventDefault() router.go("/index") } } // 添加 const _signup = () => { const $btnClose = $("#users-close") // 提交表单 serialize 拿到表单的数据 const data = $("#users-form").serialize() // 模拟表单提交 $.ajax({ url: "/api/users",
// 添加成功之后刷新页面 console.log(res) // 添加数据之后读取数据 _loadData() // 然后渲染第一页的数据 // _list(1) } }) $btnClose.click() } // 分页逻辑 const _pagenation = (data) => { // 定义总条数 const total = data.length // 求总页数 const pageCount = Math.ceil(total / pageSize) // 根据总页数生成数组 const pageArray = new Array(pageCount) const htmlPage = usersListPageTpl({ pageArray, }) $("#users-page").html(htmlPage) // 默认页面页码高亮 $("#users-page-list li:nth-child(2)").addClass("active") // 点击之后页码高亮 $("#users-page-list li:not(:first-child, :last-child)").on("click", function () { $(this).addClass("active").siblings().removeClass("active") // 渲染第二页 _list($(this).index()) }) } // 加载数据方法 const _loadData = () => { return $.ajax({ url: "/api/users", // async: false 发送同步请求 // async: false, success(result) { dataList = result.data // // 分页 _pagenation(result.data) _list(1) } // success(result) { // // 渲染list逻辑 // $("#users-list").html(usersListTpl({ // data: result.data.slice((pageNum - 1) * pageSize, pageNum * pageSize) // })) // } }) } /** * * @param {*} pageNum 当前在第几页 */ const _list = (pageNum) => { url: "/api/users/list", // 渲染list逻辑 $("#users-list").html(usersListTpl({ data: dataList.slice((pageNum - 1) * pageSize, pageNum * pageSize) })) } // 首页 const index = (router) => { return async (req, res, next) => { // 渲染页面 res.render(htmlIndex) // 让页面撑满屏幕 // window.resize $(window, ".wrapper").resize() // 填充用户列表 $("#content").html(usersTpl()) // 绑定删除按钮 $("users-list").on("click", ".remove", () => { console.log(0); }) // 第一次 渲染list await _loadData() // _list(1) // 点击保存 提交表单 $("#users-save").on("click", _signup) } } // 登录页 const signin = (router) => { return (req, res, next) => { res.render(htmlSignin) // 在登录页中绑定点击事件 $("#signin").on("submit", _handleSubmit(router)) } } const signup = () => { } export { index, signin, }
type: "post", data, success(res) {
discoverSavedQueries.tsx
import {Client} from 'app/api'; import {SavedQuery, NewQuery} from 'app/stores/discoverSavedQueriesStore'; import DiscoverSavedQueryActions from 'app/actions/discoverSavedQueryActions'; import {t} from 'app/locale'; import {addErrorMessage} from 'app/actionCreators/indicator'; export function fetchSavedQueries(api: Client, orgId: string): Promise<SavedQuery[]> { DiscoverSavedQueryActions.startFetchSavedQueries(); const promise = api.requestPromise(`/organizations/${orgId}/discover/saved/`, { method: 'GET', }); promise
.catch(() => { DiscoverSavedQueryActions.fetchSavedQueriesError(); addErrorMessage(t('Unable to load saved queries')); }); return promise; } export function createSavedQuery( api: Client, orgId: string, query: NewQuery ): Promise<SavedQuery> { const promise = api.requestPromise(`/organizations/${orgId}/discover/saved/`, { method: 'POST', data: query, }); promise .then(resp => { DiscoverSavedQueryActions.createSavedQuerySuccess(resp); }) .catch(() => { addErrorMessage(t('Unable to create your saved query')); }); return promise; } export function updateSavedQuery( api: Client, orgId: string, query: NewQuery ): Promise<SavedQuery> { const promise = api.requestPromise( `/organizations/${orgId}/discover/saved/${query.id}/`, { method: 'PUT', data: query, } ); promise .then(resp => { DiscoverSavedQueryActions.updateSavedQuerySuccess(resp); }) .catch(() => { addErrorMessage(t('Unable to update your saved query')); }); return promise; } export function deleteSavedQuery( api: Client, orgId: string, queryId: string ): Promise<null> { const promise = api.requestPromise( `/organizations/${orgId}/discover/saved/${queryId}/`, {method: 'DELETE'} ); promise .then(() => { DiscoverSavedQueryActions.deleteSavedQuerySuccess(queryId); }) .catch(() => { addErrorMessage(t('Unable to delete the saved query')); }); return promise; }
.then(resp => { DiscoverSavedQueryActions.fetchSavedQueriesSuccess(resp); })
stream_consumer.rs
//! Stream-based consumer implementation. use crate::rdsys; use crate::rdsys::types::*; use futures::sync::mpsc; use futures::{Future, Poll, Sink, Stream}; use crate::config::{ClientConfig, FromClientConfig, FromClientConfigAndContext}; use crate::consumer::base_consumer::BaseConsumer; use crate::consumer::{Consumer, ConsumerContext, DefaultConsumerContext}; use crate::error::{KafkaError, KafkaResult}; use crate::message::BorrowedMessage; use crate::util::duration_to_millis; use std::ptr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::{self, JoinHandle}; use std::time::Duration; /// Default channel size for the stream consumer. The number of context switches /// seems to decrease exponentially as the channel size is increased, and it stabilizes when /// the channel size reaches 10 or so. const CONSUMER_CHANNEL_SIZE: usize = 10; /// A small wrapper for a message pointer. This wrapper is only used to /// pass a message between the polling thread and the thread consuming the stream, /// and to transform it from pointer to `BorrowedMessage` with a lifetime that derives from the /// lifetime of the stream consumer. In general is not safe to pass a struct with an internal /// reference across threads. However the `StreamConsumer` guarantees that the polling thread /// is terminated before the consumer is actually dropped, ensuring that the messages /// are safe to be used for their entire lifetime. struct PolledMessagePtr { message_ptr: *mut RDKafkaMessage, } impl PolledMessagePtr { /// Creates a new PolledPtr from a message pointer. It takes the ownership of the message. fn new(message_ptr: *mut RDKafkaMessage) -> PolledMessagePtr { trace!("New polled ptr {:?}", message_ptr); PolledMessagePtr { message_ptr } } /// Transforms the `PolledMessagePtr` into a message whose lifetime will be bound to the /// lifetime of the provided consumer. If the librdkafka message represents an error, the error /// will be returned instead. fn into_message_of<C: ConsumerContext>( mut self, consumer: &StreamConsumer<C>, ) -> KafkaResult<BorrowedMessage> { let msg = unsafe { BorrowedMessage::from_consumer(self.message_ptr, consumer) }; self.message_ptr = ptr::null_mut(); msg } } impl Drop for PolledMessagePtr { /// If the `PolledMessagePtr` is hasn't been transformed into a message and the pointer is /// still available, it will free the underlying resources. fn drop(&mut self) { if !self.message_ptr.is_null() { trace!("Destroy PolledPtr {:?}", self.message_ptr); unsafe { rdsys::rd_kafka_message_destroy(self.message_ptr) }; } } } /// Allow message pointer to be moved across threads. unsafe impl Send for PolledMessagePtr {} /// A Kafka consumer implementing Stream. /// /// It can be used to receive messages as they are consumed from Kafka. Note: there might be /// buffering between the actual Kafka consumer and the receiving end of this stream, so it is not /// advised to use automatic commit, as some messages might have been consumed by the internal Kafka /// consumer but not processed. Manual offset storing should be used, see the `store_offset` /// function on `Consumer`. pub struct MessageStream<'a, C: ConsumerContext + 'static> { consumer: &'a StreamConsumer<C>, receiver: mpsc::Receiver<Option<PolledMessagePtr>>, } impl<'a, C: ConsumerContext + 'static> MessageStream<'a, C> { fn new( consumer: &'a StreamConsumer<C>, receiver: mpsc::Receiver<Option<PolledMessagePtr>>, ) -> MessageStream<'a, C> {
} impl<'a, C: ConsumerContext + 'a> Stream for MessageStream<'a, C> { type Item = KafkaResult<BorrowedMessage<'a>>; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { self.receiver.poll().map(|ready| { ready.map(|option| { option.map(|polled_ptr_opt| { polled_ptr_opt.map_or(Err(KafkaError::NoMessageReceived), |polled_ptr| { polled_ptr.into_message_of(self.consumer) }) }) }) }) } } /// Internal consumer loop. This is the main body of the thread that will drive the stream consumer. /// If `send_none` is true, the loop will send a None into the sender every time the poll times out. fn poll_loop<C: ConsumerContext>( consumer: &BaseConsumer<C>, sender: mpsc::Sender<Option<PolledMessagePtr>>, should_stop: &AtomicBool, poll_interval: Duration, send_none: bool, ) { trace!("Polling thread loop started"); let mut curr_sender = sender; let poll_interval_ms = duration_to_millis(poll_interval) as i32; while !should_stop.load(Ordering::Relaxed) { trace!("Polling base consumer"); let future_sender = match consumer.poll_raw(poll_interval_ms) { None => { if send_none { curr_sender.send(None) } else { continue; // TODO: check stream closed } } Some(m_ptr) => curr_sender.send(Some(PolledMessagePtr::new(m_ptr))), }; match future_sender.wait() { Ok(new_sender) => curr_sender = new_sender, Err(e) => { debug!("Sender not available: {:?}", e); break; } }; } trace!("Polling thread loop terminated"); } /// A Kafka Consumer providing a `futures::Stream` interface. /// /// This consumer doesn't need to be polled since it has a separate polling thread. Due to the /// asynchronous nature of the stream, some messages might be consumed by the consumer without being /// processed on the other end of the stream. If auto commit is used, it might cause message loss /// after consumer restart. Manual offset storing should be used, see the `store_offset` function on /// `Consumer`. #[must_use = "Consumer polling thread will stop immediately if unused"] pub struct StreamConsumer<C: ConsumerContext + 'static = DefaultConsumerContext> { consumer: Arc<BaseConsumer<C>>, should_stop: Arc<AtomicBool>, handle: Mutex<Option<JoinHandle<()>>>, } impl<C: ConsumerContext> Consumer<C> for StreamConsumer<C> { fn get_base_consumer(&self) -> &BaseConsumer<C> { Arc::as_ref(&self.consumer) } } impl FromClientConfig for StreamConsumer { fn from_config(config: &ClientConfig) -> KafkaResult<StreamConsumer> { StreamConsumer::from_config_and_context(config, DefaultConsumerContext) } } /// Creates a new `StreamConsumer` starting from a `ClientConfig`. impl<C: ConsumerContext> FromClientConfigAndContext<C> for StreamConsumer<C> { fn from_config_and_context( config: &ClientConfig, context: C, ) -> KafkaResult<StreamConsumer<C>> { let stream_consumer = StreamConsumer { consumer: Arc::new(BaseConsumer::from_config_and_context(config, context)?), should_stop: Arc::new(AtomicBool::new(false)), handle: Mutex::new(None), }; Ok(stream_consumer) } } impl<C: ConsumerContext> StreamConsumer<C> { /// Starts the StreamConsumer with default configuration (100ms polling interval and no /// `NoMessageReceived` notifications). pub fn start(&self) -> MessageStream<C> { self.start_with(Duration::from_millis(100), false) } /// Starts the StreamConsumer with the specified poll interval. Additionally, if /// `no_message_error` is set to true, it will return an error of type /// `KafkaError::NoMessageReceived` every time the poll interval is reached and no message has /// been received. pub fn start_with(&self, poll_interval: Duration, no_message_error: bool) -> MessageStream<C> { // TODO: verify called once let (sender, receiver) = mpsc::channel(CONSUMER_CHANNEL_SIZE); let consumer = self.consumer.clone(); let should_stop = self.should_stop.clone(); let handle = thread::Builder::new() .name("poll".to_string()) .spawn(move || { poll_loop( consumer.as_ref(), sender, should_stop.as_ref(), poll_interval, no_message_error, ); }) .expect("Failed to start polling thread"); *self.handle.lock().unwrap() = Some(handle); MessageStream::new(self, receiver) } /// Stops the StreamConsumer, blocking the caller until the internal consumer has been stopped. pub fn stop(&self) { let mut handle = self.handle.lock().unwrap(); if let Some(handle) = handle.take() { trace!("Stopping polling"); self.should_stop.store(true, Ordering::Relaxed); match handle.join() { Ok(()) => trace!("Polling stopped"), Err(e) => warn!("Failure while terminating thread: {:?}", e), }; } } } impl<C: ConsumerContext> Drop for StreamConsumer<C> { fn drop(&mut self) { trace!("Destroy StreamConsumer"); // The polling thread must be fully stopped before we can proceed with the actual drop, // otherwise it might consume from a destroyed consumer. self.stop(); } }
MessageStream { consumer, receiver } }
dummy.py
""" Dummy layout. Used when somebody creates an `Application` without specifying a `Layout`. """ from quo.text import HTML from quo.keys import KeyBinder
from .dimension import D from .layout import Layout __all__ = [ "create_dummy_layout", ] E = KeyPressEvent def create_dummy_layout() -> Layout: """ Create a dummy layout for use in an 'Application' that doesn't have a layout specified. When ENTER is pressed, the application quits. """ kb = KeyBinder() @kb.add("enter") def enter(event: E) -> None: event.app.exit() control = FormattedTextControl( HTML("No layout specified. Press <reverse>ENTER</reverse> to quit."), key_bindings=kb, ) window = Window(content=control, height=D(min=1)) return Layout(container=window, focused_element=window)
from quo.keys.key_binding.key_processor import KeyPressEvent from .containers import Window from .controls import FormattedTextControl
Chevron.js
import React from "react" import PropTypes from "prop-types" const ChevronLeft = props => { const { color, size, className, ...otherProps } = props return ( <svg xmlns="http://www.w3.org/2000/svg" width={size} height={size} viewBox="0 0 24 24" fill="none" stroke={color} strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className={className} {...otherProps} > <polyline points="6 9 12 15 18 9" /> </svg> ) }
color: PropTypes.string, size: PropTypes.oneOfType([PropTypes.string, PropTypes.number]), } ChevronLeft.defaultProps = { color: "currentColor", size: "24", } export default ChevronLeft
ChevronLeft.propTypes = {
json_ser.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn serialize_structure_crate_input_finalize_device_claim_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::FinalizeDeviceClaimInput, ) -> Result<(), aws_smithy_http::operation::SerializationError>
pub fn serialize_structure_crate_input_invoke_device_method_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::InvokeDeviceMethodInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_5) = &input.device_method { let mut object_6 = object.key("deviceMethod").start_object(); crate::json_ser::serialize_structure_crate_model_device_method(&mut object_6, var_5)?; object_6.finish(); } if let Some(var_7) = &input.device_method_parameters { object.key("deviceMethodParameters").string(var_7); } Ok(()) } pub fn serialize_structure_crate_input_tag_resource_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::TagResourceInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_8) = &input.tags { let mut object_9 = object.key("tags").start_object(); for (key_10, value_11) in var_8 { { object_9.key(key_10).string(value_11); } } object_9.finish(); } Ok(()) } pub fn serialize_structure_crate_input_update_device_state_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateDeviceStateInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if input.enabled { object.key("enabled").boolean(input.enabled); } Ok(()) } pub fn serialize_structure_crate_model_device_method( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::DeviceMethod, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_12) = &input.device_type { object.key("deviceType").string(var_12); } if let Some(var_13) = &input.method_name { object.key("methodName").string(var_13); } Ok(()) }
{ if let Some(var_1) = &input.tags { let mut object_2 = object.key("tags").start_object(); for (key_3, value_4) in var_1 { { object_2.key(key_3).string(value_4); } } object_2.finish(); } Ok(()) }
contacts.js
$(document).ready(function(){ if ($('#map').length) { var lat = 57.651774; var log = 39.829889; var urlBaby = window.location.hostname; var markerUrl = 'static/img/assets/contacts/marker.svg';
var init = function init() { myMap = new ymaps.Map("map", { center: [lat, log], zoom: 18, controls: [] }); myMap.behaviors.disable('scrollZoom'); var myPlacemark = new ymaps.Placemark([lat, log], {}, { iconLayout: 'default#image', iconImageHref: markerUrl, iconImageSize: [60, 60], iconImageOffset: [-3, -42] }); myMap.geoObjects.add(myPlacemark); }; ymaps.ready(init); } })
var myMap; var myPlacemark;
vscode.proposed.d.ts
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ // This is the place for API experiments and proposal. declare module 'vscode' {
export interface ProblemPattern { /** * The regular expression to find a problem in the console output of an * executed task. */ regexp: RegExp; /** * The match group index of the filename. * * Defaults to 1 if omitted. */ file?: number; /** * The match group index of the problems's location. Valid location * patterns are: (line), (line,column) and (startLine,startColumn,endLine,endColumn). * If omitted the line and colum properties are used. */ location?: number; /** * The match group index of the problem's line in the source file. * * Defaults to 2 if omitted. */ line?: number; /** * The match group index of the problem's character in the source file. * * Defaults to 3 if omitted. */ character?: number; /** * The match group index of the problem's end line in the source file. * * Defaults to undefined. No end line is captured. */ endLine?: number; /** * The match group index of the problem's end character in the source file. * * Defaults to undefined. No end column is captured. */ endCharacter?: number; /** * The match group index of the problem's severity. * * Defaults to undefined. In this case the problem matcher's severity * is used. */ severity?: number; /** * The match group index of the problems's code. * * Defaults to undefined. No code is captured. */ code?: number; /** * The match group index of the message. If omitted it defaults * to 4 if location is specified. Otherwise it defaults to 5. */ message?: number; /** * Specifies if the last pattern in a multi line problem matcher should * loop as long as it does match a line consequently. Only valid on the * last problem pattern in a multi line problem matcher. */ loop?: boolean; } /** * A multi line problem pattern. */ export type MultiLineProblemPattern = ProblemPattern[]; /** * The way how the file location is interpreted */ export enum FileLocationKind { /** * VS Code should decide based on whether the file path found in the * output is absolute or relative. A relative file path will be treated * relative to the workspace root. */ Auto = 1, /** * Always treat the file path relative. */ Relative = 2, /** * Always treat the file path absolute. */ Absolute = 3 } /** * Controls to which kind of documents problems are applied. */ export enum ApplyToKind { /** * Problems are applied to all documents. */ AllDocuments = 1, /** * Problems are applied to open documents only. */ OpenDocuments = 2, /** * Problems are applied to closed documents only. */ ClosedDocuments = 3 } /** * A background monitor pattern */ export interface BackgroundPattern { /** * The actual regular expression */ regexp: RegExp; /** * The match group index of the filename. If provided the expression * is matched for that file only. */ file?: number; } /** * A description to control the activity of a problem matcher * watching a background task. */ export interface BackgroundMonitor { /** * If set to true the monitor is in active mode when the task * starts. This is equals of issuing a line that matches the * beginPattern. */ activeOnStart?: boolean; /** * If matched in the output the start of a background activity is signaled. */ beginsPattern: RegExp | BackgroundPattern; /** * If matched in the output the end of a background activity is signaled. */ endsPattern: RegExp | BackgroundPattern; } /** * Defines a problem matcher */ export interface ProblemMatcher { /** * The owner of a problem. Defaults to a generated id * if omitted. */ owner?: string; /** * The type of documents problems detected by this matcher * apply to. Default to `ApplyToKind.AllDocuments` if omitted. */ applyTo?: ApplyToKind; /** * How a file location recognize by a matcher should be interpreted. If omitted the file location * if `FileLocationKind.Auto`. */ fileLocation?: FileLocationKind | string; /** * The actual pattern used by the problem matcher. */ pattern: ProblemPattern | MultiLineProblemPattern; /** * The default severity of a detected problem in the output. Used * if the `ProblemPattern` doesn't define a severity match group. */ severity?: DiagnosticSeverity; /** * A background monitor for tasks that are running in the background. */ backgound?: BackgroundMonitor; } /** * Controls the behaviour of the terminal's visibility. */ export enum RevealKind { /** * Always brings the terminal to front if the task is executed. */ Always = 1, /** * Only brings the terminal to front if a problem is detected executing the task * (e.g. the task couldn't be started because). */ Silent = 2, /** * The terminal never comes to front when the task is executed. */ Never = 3 } /** * Controls terminal specific behaviour. */ export interface TerminalBehaviour { /** * Controls whether the terminal executing a task is brought to front or not. * Defaults to `RevealKind.Always`. */ reveal?: RevealKind; /** * Controls whether the command is echoed in the terminal or not. */ echo?: boolean; } export interface ProcessOptions { /** * The current working directory of the executed program or shell. * If omitted VSCode's current workspace root is used. */ cwd?: string; /** * The additional environment of the executed program or shell. If omitted * the parent process' environment is used. If provided it is merged with * the parent process' environment. */ env?: { [key: string]: string }; } export namespace TaskGroup { /** * The clean task group */ export const Clean: 'clean'; /** * The build task group */ export const Build: 'build'; /** * The rebuild all task group */ export const RebuildAll: 'rebuildAll'; /** * The test task group */ export const Test: 'test'; } /** * The supported task groups. */ export type TaskGroup = 'clean' | 'build' | 'rebuildAll' | 'test'; /** * A task that starts an external process. */ export class ProcessTask { /** * Creates a process task. * * @param name the task's name. Is presented in the user interface. * @param process the process to start. * @param problemMatchers the problem matchers to use. */ constructor(name: string, process: string, ...problemMatchers: ProblemMatcher[]); /** * Creates a process task. * * @param name the task's name. Is presented in the user interface. * @param process the process to start. * @param args arguments to be passed to the process. * @param problemMatchers the problem matchers to use. */ constructor(name: string, process: string, args: string[], ...problemMatchers: ProblemMatcher[]); /** * Creates a process task. * * @param name the task's name. Is presented in the user interface. * @param process the process to start. * @param args arguments to be passed to the process. * @param options additional options for the started process. * @param problemMatchers the problem matchers to use. */ constructor(name: string, process: string, args: string[], options: ProcessOptions, ...problemMatchers: ProblemMatcher[]); /** * The task's name */ readonly name: string; /** * The task's identifier. If omitted the name is * used as an identifier. */ identifier: string; /** * Whether the task is a background task or not. */ isBackground: boolean; /** * The process to be executed. */ readonly process: string; /** * The arguments passed to the process. Defaults to an empty array. */ args: string[]; /** * The task group this tasks belongs to. Defaults to undefined meaning * that the task doesn't belong to any special group. */ group?: TaskGroup; /** * The process options used when the process is executed. * Defaults to an empty object literal. */ options: ProcessOptions; /** * The terminal options. Defaults to an empty object literal. */ terminal: TerminalBehaviour; /** * The problem matchers attached to the task. Defaults to an empty * array. */ problemMatchers: ProblemMatcher[]; } export type ShellOptions = { /** * The shell executable. */ executable: string; /** * The arguments to be passed to the shell executable used to run the task. */ shellArgs?: string[]; /** * The current working directory of the executed shell. * If omitted VSCode's current workspace root is used. */ cwd?: string; /** * The additional environment of the executed shell. If omitted * the parent process' environment is used. If provided it is merged with * the parent process' environment. */ env?: { [key: string]: string }; } | { /** * The current working directory of the executed shell. * If omitted VSCode's current workspace root is used. */ cwd: string; /** * The additional environment of the executed shell. If omitted * the parent process' environment is used. If provided it is merged with * the parent process' environment. */ env?: { [key: string]: string }; } | { /** * The current working directory of the executed shell. * If omitted VSCode's current workspace root is used. */ cwd?: string; /** * The additional environment of the executed shell. If omitted * the parent process' environment is used. If provided it is merged with * the parent process' environment. */ env: { [key: string]: string }; }; /** * A task that executes a shell command. */ export class ShellTask { /** * Creates a shell task. * * @param name the task's name. Is presented in the user interface. * @param commandLine the command line to execute. * @param problemMatchers the problem matchers to use. */ constructor(name: string, commandLine: string, ...problemMatchers: ProblemMatcher[]); /** * Creates a shell task. * * @param name the task's name. Is presented in the user interface. * @param commandLine the command line to execute. * @param options additional options used when creating the shell. * @param problemMatchers the problem matchers to use. */ constructor(name: string, commandLine: string, options: ShellOptions, ...problemMatchers: ProblemMatcher[]); /** * The task's name */ readonly name: string; /** * The task's identifier. If omitted the name is * used as an identifier. */ identifier: string; /** * Whether the task is a background task or not. */ isBackground: boolean; /** * The command line to execute. */ readonly commandLine: string; /** * The task group this tasks belongs to. Defaults to undefined meaning * that the task doesn't belong to any special group. */ group?: TaskGroup; /** * The shell options used when the shell is executed. Defaults to an * empty object literal. */ options: ShellOptions; /** * The terminal options. Defaults to an empty object literal. */ terminal: TerminalBehaviour; /** * The problem matchers attached to the task. Defaults to an empty * array. */ problemMatchers: ProblemMatcher[]; } export type Task = ProcessTask | ShellTask; /** * A task provider allows to add tasks to the task service. * A task provider is registerd via #workspace.registerTaskProvider. */ export interface TaskProvider { /** * Provides additional tasks. * @param token A cancellation token. * @return a #TaskSet */ provideTasks(token: CancellationToken): ProviderResult<Task[]>; } export namespace workspace { /** * Register a task provider. * * @param provider A task provider. * @return A [disposable](#Disposable) that unregisters this provider when being disposed. */ export function registerTaskProvider(provider: TaskProvider): Disposable; } export namespace window { export function sampleFunction(): Thenable<any>; } export namespace window { /** * Create a new [TreeView](#TreeView) instance. * * @param viewId A unique id that identifies the view. * @param provider A [TreeDataProvider](#TreeDataProvider). * @return An instance of [TreeView](#TreeView). */ export function createTreeView<T>(viewId: string, provider: TreeDataProvider<T>): TreeView<T>; } /** * An source control is able to provide [resource states](#SourceControlResourceState) * to the editor and interact with the editor in several source control related ways. */ export interface TreeView<T> { /** * Refresh the given nodes */ refresh(...nodes: T[]): void; /** * Dispose this view */ dispose(): void; } /** * A data provider for a tree view contribution. * * The contributed tree view will ask the corresponding provider to provide the root * node and resolve children for each node. In addition, the provider could **optionally** * provide the following information for each node: * - label: A human-readable label used for rendering the node. * - hasChildren: Whether the node has children and is expandable. * - clickCommand: A command to execute when the node is clicked. */ export interface TreeDataProvider<T> { /** * Provide the root node. This function will be called when the tree explorer is activated * for the first time. The root node is hidden and its direct children will be displayed on the first level of * the tree explorer. * * @return The root node. */ provideRootNode(): T | Thenable<T>; /** * Resolve the children of `node`. * * @param node The node from which the provider resolves children. * @return Children of `node`. */ resolveChildren?(node: T): T[] | Thenable<T[]>; /** * Provide a human-readable string that will be used for rendering the node. Default to use * `node.toString()` if not provided. * * @param node The node from which the provider computes label. * @return A human-readable label. */ getLabel?(node: T): string; /** * Determine if `node` has children and is expandable. Default to `true` if not provided. * * @param node The node to determine if it has children and is expandable. * @return A boolean that determines if `node` has children and is expandable. */ getHasChildren?(node: T): boolean; /** * Get the command to execute when `node` is clicked. * * Commands can be registered through [registerCommand](#commands.registerCommand). `node` will be provided * as the first argument to the command's callback function. * * @param node The node that the command is associated with. * @return The command to execute when `node` is clicked. */ getClickCommand?(node: T): Command; } /** * The contiguous set of modified lines in a diff. */ export interface LineChange { readonly originalStartLineNumber: number; readonly originalEndLineNumber: number; readonly modifiedStartLineNumber: number; readonly modifiedEndLineNumber: number; } export namespace commands { /** * Registers a diff information command that can be invoked via a keyboard shortcut, * a menu item, an action, or directly. * * Diff information commands are different from ordinary [commands](#commands.registerCommand) as * they only execute when there is an active diff editor when the command is called, and the diff * information has been computed. Also, the command handler of an editor command has access to * the diff information. * * @param command A unique identifier for the command. * @param callback A command handler function with access to the [diff information](#LineChange). * @param thisArg The `this` context used when invoking the handler function. * @return Disposable which unregisters this command on disposal. */ export function registerDiffInformationCommand(command: string, callback: (diff: LineChange[], ...args: any[]) => any, thisArg?: any): Disposable; } export interface Terminal { /** * The name of the terminal. */ readonly name: string; /** * The process ID of the shell process. */ readonly processId: Thenable<number>; /** * Send text to the terminal. The text is written to the stdin of the underlying pty process * (shell) of the terminal. * * @param text The text to send. * @param addNewLine Whether to add a new line to the text being sent, this is normally * required to run a command in the terminal. The character(s) added are \n or \r\n * depending on the platform. This defaults to `true`. */ sendText(text: string, addNewLine?: boolean): void; /** * Show the terminal panel and reveal this terminal in the UI. * * @param preserveFocus When `true` the terminal will not take focus. */ show(preserveFocus?: boolean): void; /** * Hide the terminal panel if this terminal is currently showing. */ hide(): void; /** * Dispose and free associated resources. */ dispose(): void; /** * Experimental API that allows listening to the raw data stream coming from the terminal's * pty process (including ANSI escape sequences). * * @param callback The callback that is triggered when data is sent to the terminal. */ onData(callback: (data: string) => any): void; } }
/** * Defines a problem pattern */
vec_deque.rs
use std::collections::VecDeque; use test::{Bencher, black_box}; #[bench] fn bench_new(b: &mut Bencher) { b.iter(|| { let ring: VecDeque<i32> = VecDeque::new(); black_box(ring); }) } #[bench] fn bench_grow_1025(b: &mut Bencher) { b.iter(|| { let mut deq = VecDeque::new(); for i in 0..1025 { deq.push_front(i); } black_box(deq); }) } #[bench] fn bench_iter_1000(b: &mut Bencher) { let ring: VecDeque<_> = (0..1000).collect(); b.iter(|| { let mut sum = 0; for &i in &ring { sum += i; } black_box(sum); }) } #[bench] fn
(b: &mut Bencher) { let mut ring: VecDeque<_> = (0..1000).collect(); b.iter(|| { let mut sum = 0; for i in &mut ring { sum += *i; } black_box(sum); }) }
bench_mut_iter_1000
proc.go
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import ( "internal/cpu" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) // Functions called by C code. //go:linkname main //go:linkname goparkunlock //go:linkname newextram //go:linkname acquirep //go:linkname releasep //go:linkname incidlelocked //go:linkname ginit //go:linkname schedinit //go:linkname ready //go:linkname stopm //go:linkname handoffp //go:linkname wakep //go:linkname stoplockedm //go:linkname schedule //go:linkname execute //go:linkname goexit1 //go:linkname reentersyscall //go:linkname reentersyscallblock //go:linkname exitsyscall //go:linkname gfget //go:linkname kickoff //go:linkname mstart1 //go:linkname mexit //go:linkname globrunqput //go:linkname pidleget // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go). //go:linkname getm // Function called by misc/cgo/test. //go:linkname lockedOSThread // C functions for thread and context management. func newosproc(*m) //go:noescape func malg(bool, bool, *unsafe.Pointer, *uintptr) *g //go:noescape func resetNewG(*g, *unsafe.Pointer, *uintptr) func gogo(*g) func setGContext() func makeGContext(*g, unsafe.Pointer, uintptr) func getTraceback(me, gp *g) func gtraceback(*g) func _cgo_notify_runtime_init_done() func alreadyInCallers() bool func stackfree(*g) // Functions created by the compiler. //extern __go_init_main func main_init() //extern main.main func main_main() var buildVersion = sys.TheVersion // set using cmd/go/internal/modload.ModInfoProg var modinfo string // Goroutine scheduler // The scheduler's job is to distribute ready-to-run goroutines over worker threads. // // The main concepts are: // G - goroutine. // M - worker thread, or machine. // P - processor, a resource that is required to execute Go code. // M must have an associated P to execute Go code, however it can be // blocked or in a syscall w/o an associated P. // // Design doc at https://golang.org/s/go11sched. // Worker thread parking/unparking. // We need to balance between keeping enough running worker threads to utilize // available hardware parallelism and parking excessive running worker threads // to conserve CPU resources and power. This is not simple for two reasons: // (1) scheduler state is intentionally distributed (in particular, per-P work // queues), so it is not possible to compute global predicates on fast paths; // (2) for optimal thread management we would need to know the future (don't park // a worker thread when a new goroutine will be readied in near future). // // Three rejected approaches that would work badly: // 1. Centralize all scheduler state (would inhibit scalability). // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there // is a spare P, unpark a thread and handoff it the thread and the goroutine. // This would lead to thread state thrashing, as the thread that readied the // goroutine can be out of work the very next moment, we will need to park it. // Also, it would destroy locality of computation as we want to preserve // dependent goroutines on the same thread; and introduce additional latency. // 3. Unpark an additional thread whenever we ready a goroutine and there is an // idle P, but don't do handoff. This would lead to excessive thread parking/ // unparking as the additional threads will instantly park without discovering // any work to do. // // The current approach: // We unpark an additional thread when we ready a goroutine if (1) there is an // idle P and there are no "spinning" worker threads. A worker thread is considered // spinning if it is out of local work and did not find work in global run queue/ // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. // Threads unparked this way are also considered spinning; we don't do goroutine // handoff so such threads are out of work initially. Spinning threads do some // spinning looking for work in per-P run queues before parking. If a spinning // thread finds work it takes itself out of the spinning state and proceeds to // execution. If it does not find work it takes itself out of the spinning state // and then parks. // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark // new threads when readying goroutines. To compensate for that, if the last spinning // thread finds work and stops spinning, it must unpark a new spinning thread. // This approach smooths out unjustified spikes of thread unparking, // but at the same time guarantees eventual maximal CPU parallelism utilization. // // The main implementation complication is that we need to be very careful during // spinning->non-spinning thread transition. This transition can race with submission // of a new goroutine, and either one part or another needs to unpark another worker // thread. If they both fail to do that, we can end up with semi-persistent CPU // underutilization. The general pattern for goroutine readying is: submit a goroutine // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. // The general pattern for spinning->non-spinning transition is: decrement nmspinning, // #StoreLoad-style memory barrier, check all per-P work queues for new work. // Note that all this complexity does not apply to global run queue as we are not // sloppy about thread unparking when submitting to global queue. Also see comments // for nmspinning manipulation. var ( m0 m g0 g raceprocctx0 uintptr ) // main_init_done is a signal used by cgocallbackg that initialization // has been completed. It is made before _cgo_notify_runtime_init_done, // so all cgo calls can rely on it existing. When main_init is complete, // it is closed, meaning cgocallbackg can reliably receive from it. var main_init_done chan bool // mainStarted indicates that the main M has started. var mainStarted bool // runtimeInitTime is the nanotime() at which the runtime started. var runtimeInitTime int64 // Value to use for signal mask for newly created M's. var initSigmask sigset // The main goroutine. func main(unsafe.Pointer) { g := getg() // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. // Using decimal instead of binary GB and MB because // they look nicer in the stack overflow failure message. if sys.PtrSize == 8 { maxstacksize = 1000000000 } else { maxstacksize = 250000000 } // Allow newproc to start new Ms. mainStarted = true if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon systemstack(func() { newm(sysmon, nil) }) } // Lock the main goroutine onto this, the main OS thread, // during initialization. Most programs won't care, but a few // do require certain calls to be made by the main thread. // Those can arrange for main.main to run in the main thread // by calling runtime.LockOSThread during initialization // to preserve the lock. lockOSThread() if g.m != &m0 { throw("runtime.main not on m0") } if nanotime() == 0 { throw("nanotime returning zero") } // Defer unlock so that runtime.Goexit during init does the unlock too. needUnlock := true defer func() { if needUnlock { unlockOSThread() } }() // Record when the world started. runtimeInitTime = nanotime() main_init_done = make(chan bool) if iscgo { // Start the template thread in case we enter Go from // a C-created thread and need to create a new thread. startTemplateThread() _cgo_notify_runtime_init_done() } fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime fn() createGcRootsIndex() close(main_init_done) // For gccgo we have to wait until after main is initialized // to enable GC, because initializing main registers the GC roots. gcenable() needUnlock = false unlockOSThread() if isarchive || islibrary { // A program compiled with -buildmode=c-archive or c-shared // has a main, but it is not executed. return } fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime fn() if raceenabled { racefini() } // Make racy client program work: if panicking on // another goroutine at the same time as main returns, // let the other goroutine finish printing the panic trace. // Once it does, it will exit. See issues 3934 and 20018. if atomic.Load(&runningPanicDefers) != 0 { // Running deferred functions should not take long. for c := 0; c < 1000; c++ { if atomic.Load(&runningPanicDefers) == 0 { break } Gosched() } } if atomic.Load(&panicking) != 0 { gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) } exit(0) for { var x *int32 *x = 0 } } // os_beforeExit is called from os.Exit(0). //go:linkname os_beforeExit os.runtime_beforeExit func os_beforeExit() { if raceenabled { racefini() } } // start forcegc helper goroutine func init() { expectSystemGoroutine() go forcegchelper() } func forcegchelper() { setSystemGoroutine() forcegc.g = getg() for { lock(&forcegc.lock) if forcegc.idle != 0 { throw("forcegc: phase error") } atomic.Store(&forcegc.idle, 1) goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1) // this goroutine is explicitly resumed by sysmon if debug.gctrace > 0 { println("GC forced") } // Time-triggered, fully concurrent. gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) } } //go:nosplit // Gosched yields the processor, allowing other goroutines to run. It does not // suspend the current goroutine, so execution resumes automatically. func Gosched() { checkTimeouts() mcall(gosched_m) } // goschedguarded yields the processor like gosched, but also checks // for forbidden states and opts out of the yield in those cases. //go:nosplit func goschedguarded() { mcall(goschedguarded_m) } // Puts the current goroutine into a waiting state and calls unlockf. // If unlockf returns false, the goroutine is resumed. // unlockf must not access this G's stack, as it may be moved between // the call to gopark and the call to unlockf. // Reason explains why the goroutine has been parked. // It is displayed in stack traces and heap dumps. // Reasons should be unique and descriptive. // Do not re-use reasons, add new ones. func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { if reason != waitReasonSleep { checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy } mp := acquirem() gp := mp.curg status := readgstatus(gp) if status != _Grunning && status != _Gscanrunning { throw("gopark: bad g status") } mp.waitlock = lock mp.waitunlockf = unlockf gp.waitreason = reason mp.waittraceev = traceEv mp.waittraceskip = traceskip releasem(mp) // can't do anything that might move the G between Ms here. mcall(park_m) } // Puts the current goroutine into a waiting state and unlocks the lock. // The goroutine can be made runnable again by calling goready(gp). func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) } func goready(gp *g, traceskip int) { systemstack(func() { ready(gp, traceskip, true) }) } //go:nosplit func acquireSudog() *sudog { // Delicate dance: the semaphore implementation calls // acquireSudog, acquireSudog calls new(sudog), // new calls malloc, malloc can call the garbage collector, // and the garbage collector calls the semaphore implementation // in stopTheWorld. // Break the cycle by doing acquirem/releasem around new(sudog). // The acquirem/releasem increments m.locks during new(sudog), // which keeps the garbage collector from being invoked. mp := acquirem() pp := mp.p.ptr() if len(pp.sudogcache) == 0 { lock(&sched.sudoglock) // First, try to grab a batch from central cache. for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { s := sched.sudogcache sched.sudogcache = s.next s.next = nil pp.sudogcache = append(pp.sudogcache, s) } unlock(&sched.sudoglock) // If the central cache is empty, allocate a new one. if len(pp.sudogcache) == 0 { pp.sudogcache = append(pp.sudogcache, new(sudog)) } } n := len(pp.sudogcache) s := pp.sudogcache[n-1] pp.sudogcache[n-1] = nil pp.sudogcache = pp.sudogcache[:n-1] if s.elem != nil { throw("acquireSudog: found s.elem != nil in cache") } releasem(mp) return s } //go:nosplit func releaseSudog(s *sudog) { if s.elem != nil { throw("runtime: sudog with non-nil elem") } if s.isSelect { throw("runtime: sudog with non-false isSelect") } if s.next != nil { throw("runtime: sudog with non-nil next") } if s.prev != nil { throw("runtime: sudog with non-nil prev") } if s.waitlink != nil { throw("runtime: sudog with non-nil waitlink") } if s.c != nil { throw("runtime: sudog with non-nil c") } gp := getg() if gp.param != nil { throw("runtime: releaseSudog with non-nil gp.param") } mp := acquirem() // avoid rescheduling to another P pp := mp.p.ptr() if len(pp.sudogcache) == cap(pp.sudogcache) { // Transfer half of local cache to the central cache. var first, last *sudog for len(pp.sudogcache) > cap(pp.sudogcache)/2 { n := len(pp.sudogcache) p := pp.sudogcache[n-1] pp.sudogcache[n-1] = nil pp.sudogcache = pp.sudogcache[:n-1] if first == nil { first = p } else { last.next = p } last = p } lock(&sched.sudoglock) last.next = sched.sudogcache sched.sudogcache = first unlock(&sched.sudoglock) } pp.sudogcache = append(pp.sudogcache, s) releasem(mp) } // funcPC returns the entry PC of the function f. // It assumes that f is a func value. Otherwise the behavior is undefined. // CAREFUL: In programs with plugins, funcPC can return different values // for the same function (because there are actually multiple copies of // the same function in the address space). To be safe, don't use the // results of this function in any == expression. It is only safe to // use the result as an address at which to start executing code. // // For gccgo note that this differs from the gc implementation; the gc // implementation adds sys.PtrSize to the address of the interface // value, but GCC's alias analysis decides that that can not be a // reference to the second field of the interface, and in some cases // it drops the initialization of the second field as a dead store. //go:nosplit func funcPC(f interface{}) uintptr { i := (*iface)(unsafe.Pointer(&f)) r := *(*uintptr)(i.data) if cpu.FunctionDescriptors { // With PPC64 ELF ABI v1 function descriptors the // function address is a pointer to a struct whose // first field is the actual PC. r = *(*uintptr)(unsafe.Pointer(r)) } return r } func lockedOSThread() bool { gp := getg() return gp.lockedm != 0 && gp.m.lockedg != 0 } var ( allgs []*g allglock mutex ) func allgadd(gp *g) { if readgstatus(gp) == _Gidle { throw("allgadd: bad status Gidle") } lock(&allglock) allgs = append(allgs, gp) allglen = uintptr(len(allgs)) unlock(&allglock) } const ( // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. _GoidCacheBatch = 16 ) // cpuinit extracts the environment variable GODEBUG from the environment on // Unix-like operating systems and calls internal/cpu.Initialize. func cpuinit() { const prefix = "GODEBUG=" var env string switch GOOS { case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux": cpu.DebugOptions = true // Similar to goenv_unix but extracts the environment value for // GODEBUG directly. // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() n := int32(0) for argv_index(argv, argc+1+n) != nil { n++ } for i := int32(0); i < n; i++ { p := argv_index(argv, argc+1+i) s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) if hasPrefix(s, prefix) { env = gostring(p)[len(prefix):] break } } } cpu.Initialize(env) } func ginit() { _m_ := &m0 _g_ := &g0 _m_.g0 = _g_ _m_.curg = _g_ _g_.m = _m_ setg(_g_) } // The bootstrap sequence is: // // call osinit // call schedinit // make & queue new G // call runtime·mstart // // The new G calls runtime·main. func schedinit() { _g_ := getg() sched.maxmcount = 10000 usestackmaps = probestackmaps() mallocinit() fastrandinit() // must run before mcommoninit mcommoninit(_g_.m) cpuinit() // must run before alginit alginit() // maps must not be used before this call msigsave(_g_.m) initSigmask = _g_.m.sigmask goargs() goenvs() parsedebugvars() gcinit() sched.lastpoll = uint64(nanotime()) procs := ncpu // In 32-bit mode, we can burn a lot of memory on thread stacks. // Try to avoid this by limiting the number of threads we run // by default. if sys.PtrSize == 4 && procs > 32 { procs = 32 } if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { procs = n } if procresize(procs) != nil { throw("unknown runnable goroutine during bootstrap") } // For cgocheck > 1, we turn on the write barrier at all times // and check all pointer writes. We can't do this until after // procresize because the write barrier needs a P. if debug.cgocheck > 1 { writeBarrier.cgo = true writeBarrier.enabled = true for _, p := range allp { p.wbBuf.reset() } } if buildVersion == "" { // Condition should never trigger. This code just serves // to ensure runtime·buildVersion is kept in the resulting binary. buildVersion = "unknown" } if len(modinfo) == 1 { // Condition should never trigger. This code just serves // to ensure runtime·modinfo is kept in the resulting binary. modinfo = "" } } func dumpgstatus(gp *g) { _g_ := getg() print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") } func checkmcount() { // sched lock is held if mcount() > sched.maxmcount { print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") throw("thread exhaustion") } } func mcommoninit(mp *m) { _g_ := getg() // g0 stack won't make sense for user (and is not necessary unwindable). if _g_ != _g_.m.g0 { callers(1, mp.createstack[:]) } lock(&sched.lock) if sched.mnext+1 < sched.mnext { throw("runtime: thread ID overflow") } mp.id = sched.mnext sched.mnext++ checkmcount() mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed)) mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) if mp.fastrand[0]|mp.fastrand[1] == 0 { mp.fastrand[1] = 1 } mpreinit(mp) // Add to allm so garbage collector doesn't free g->m // when it is just in a register or thread-local storage. mp.alllink = allm // NumCgoCall() iterates over allm w/o schedlock, // so we need to publish it safely. atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) unlock(&sched.lock) } var fastrandseed uintptr func fastrandinit() { s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] getRandomData(s) } // Mark gp ready to run. func ready(gp *g, traceskip int, next bool) { if trace.enabled { traceGoUnpark(gp, traceskip) } status := readgstatus(gp) // Mark runnable. _g_ := getg() mp := acquirem() // disable preemption because it can be holding p in a local var if status&^_Gscan != _Gwaiting { dumpgstatus(gp) throw("bad g->status in ready") } // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq casgstatus(gp, _Gwaiting, _Grunnable) runqput(_g_.m.p.ptr(), gp, next) if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { wakep() } releasem(mp) } // freezeStopWait is a large value that freezetheworld sets // sched.stopwait to in order to request that all Gs permanently stop. const freezeStopWait = 0x7fffffff // freezing is set to non-zero if the runtime is trying to freeze the // world. var freezing uint32 // Similar to stopTheWorld but best-effort and can be called several times. // There is no reverse operation, used during crashing. // This function must not lock any mutexes. func freezetheworld() { atomic.Store(&freezing, 1) // stopwait and preemption requests can be lost // due to races with concurrently executing threads, // so try several times for i := 0; i < 5; i++ { // this should tell the scheduler to not start any new goroutines sched.stopwait = freezeStopWait atomic.Store(&sched.gcwaiting, 1) // this should stop running goroutines if !preemptall() { break // no running goroutines } usleep(1000) } // to be sure usleep(1000) preemptall() usleep(1000) } // All reads and writes of g's status go through readgstatus, casgstatus // castogscanstatus, casfrom_Gscanstatus. //go:nosplit func readgstatus(gp *g) uint32 { return atomic.Load(&gp.atomicstatus) } // The Gscanstatuses are acting like locks and this releases them. // If it proves to be a performance hit we should be able to make these // simple atomic stores but for now we are going to throw if // we see an inconsistent state. func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { success := false // Check that transition is valid. switch oldval { default: print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") dumpgstatus(gp) throw("casfrom_Gscanstatus:top gp->status is not in scan state") case _Gscanrunnable, _Gscanwaiting, _Gscanrunning, _Gscansyscall, _Gscanpreempted: if newval == oldval&^_Gscan { success = atomic.Cas(&gp.atomicstatus, oldval, newval) } } if !success { print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") dumpgstatus(gp) throw("casfrom_Gscanstatus: gp->status is not in scan state") } } // This will return false if the gp is not in the expected status and the cas fails. // This acts like a lock acquire while the casfromgstatus acts like a lock release. func castogscanstatus(gp *g, oldval, newval uint32) bool { switch oldval { case _Grunnable, _Grunning, _Gwaiting, _Gsyscall: if newval == oldval|_Gscan { return atomic.Cas(&gp.atomicstatus, oldval, newval) } } print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") throw("castogscanstatus") panic("not reached") } // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus // and casfrom_Gscanstatus instead. // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that // put it in the Gscan state is finished. //go:nosplit func casgstatus(gp *g, oldval, newval uint32) { if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { systemstack(func() { print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") throw("casgstatus: bad incoming values") }) } // See https://golang.org/cl/21503 for justification of the yield delay. const yieldDelay = 5 * 1000 var nextYield int64 // loop if gp->atomicstatus is in a scan state giving // GC time to finish and change the state to oldval. for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { throw("casgstatus: waiting for Gwaiting but is Grunnable") } if i == 0 { nextYield = nanotime() + yieldDelay } if nanotime() < nextYield { for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { procyield(1) } } else { osyield() nextYield = nanotime() + yieldDelay/2 } } } // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. // // TODO(austin): This is the only status operation that both changes // the status and locks the _Gscan bit. Rethink this. func casGToPreemptScan(gp *g, old, new uint32) { if old != _Grunning || new != _Gscan|_Gpreempted { throw("bad g transition") } for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) { } } // casGFromPreempted attempts to transition gp from _Gpreempted to // _Gwaiting. If successful, the caller is responsible for // re-scheduling gp. func casGFromPreempted(gp *g, old, new uint32) bool { if old != _Gpreempted || new != _Gwaiting { throw("bad g transition") } return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting) } // stopTheWorld stops all P's from executing goroutines, interrupting // all goroutines at GC safe points and records reason as the reason // for the stop. On return, only the current goroutine's P is running. // stopTheWorld must not be called from a system stack and the caller // must not hold worldsema. The caller must call startTheWorld when // other P's should resume execution. // // stopTheWorld is safe for multiple goroutines to call at the // same time. Each will execute its own stop, and the stops will // be serialized. // // This is also used by routines that do stack dumps. If the system is // in panic or being exited, this may not reliably stop all // goroutines. func stopTheWorld(reason string) { semacquire(&worldsema) getg().m.preemptoff = reason systemstack(stopTheWorldWithSema) } // startTheWorld undoes the effects of stopTheWorld. func startTheWorld() { systemstack(func() { startTheWorldWithSema(false) }) // worldsema must be held over startTheWorldWithSema to ensure // gomaxprocs cannot change while worldsema is held. semrelease(&worldsema) getg().m.preemptoff = "" } // Holding worldsema grants an M the right to try to stop the world // and prevents gomaxprocs from changing concurrently. var worldsema uint32 = 1 // stopTheWorldWithSema is the core implementation of stopTheWorld. // The caller is responsible for acquiring worldsema and disabling // preemption first and then should stopTheWorldWithSema on the system // stack: // // semacquire(&worldsema, 0) // m.preemptoff = "reason" // systemstack(stopTheWorldWithSema) // // When finished, the caller must either call startTheWorld or undo // these three operations separately: // // m.preemptoff = "" // systemstack(startTheWorldWithSema) // semrelease(&worldsema) // // It is allowed to acquire worldsema once and then execute multiple // startTheWorldWithSema/stopTheWorldWithSema pairs. // Other P's are able to execute between successive calls to // startTheWorldWithSema and stopTheWorldWithSema. // Holding worldsema causes any other goroutines invoking // stopTheWorld to block. func stopTheWorldWithSema() { _g_ := getg() // If we hold a lock, then we won't be able to stop another M // that is blocked trying to acquire the lock. if _g_.m.locks > 0 { throw("stopTheWorld: holding locks") } lock(&sched.lock) sched.stopwait = gomaxprocs atomic.Store(&sched.gcwaiting, 1) preemptall() // stop current P _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. sched.stopwait-- // try to retake all P's in Psyscall status for _, p := range allp { s := p.status if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { if trace.enabled { traceGoSysBlock(p) traceProcStop(p) } p.syscalltick++ sched.stopwait-- } } // stop idle P's for { p := pidleget() if p == nil { break } p.status = _Pgcstop sched.stopwait-- } wait := sched.stopwait > 0 unlock(&sched.lock) // wait for remaining P's to stop voluntarily if wait { for { // wait for 100us, then try to re-preempt in case of any races if notetsleep(&sched.stopnote, 100*1000) { noteclear(&sched.stopnote) break } preemptall() } } // sanity checks bad := "" if sched.stopwait != 0 { bad = "stopTheWorld: not stopped (stopwait != 0)" } else { for _, p := range allp { if p.status != _Pgcstop { bad = "stopTheWorld: not stopped (status != _Pgcstop)" } } } if atomic.Load(&freezing) != 0 { // Some other thread is panicking. This can cause the // sanity checks above to fail if the panic happens in // the signal handler on a stopped thread. Either way, // we should halt this thread. lock(&deadlock) lock(&deadlock) } if bad != "" { throw(bad) } } func startTheWorldWithSema(emitTraceEvent bool) int64 { mp := acquirem() // disable preemption because it can be holding p in a local var if netpollinited() { list := netpoll(0) // non-blocking injectglist(&list) } lock(&sched.lock) procs := gomaxprocs if newprocs != 0 { procs = newprocs newprocs = 0 } p1 := procresize(procs) sched.gcwaiting = 0 if sched.sysmonwait != 0 { sched.sysmonwait = 0 notewakeup(&sched.sysmonnote) } unlock(&sched.lock) for p1 != nil { p := p1 p1 = p1.link.ptr() if p.m != 0 { mp := p.m.ptr() p.m = 0 if mp.nextp != 0 { throw("startTheWorld: inconsistent mp->nextp") } mp.nextp.set(p) notewakeup(&mp.park) } else { // Start M to run P. Do not start another M below. newm(nil, p) } } // Capture start-the-world time before doing clean-up tasks. startTime := nanotime() if emitTraceEvent { traceGCSTWDone() } // Wakeup an additional proc in case we have excessive runnable goroutines // in local queues or in the global queue. If we don't, the proc will park itself. // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { wakep() } releasem(mp) return startTime } // First function run by a new goroutine. // This is passed to makecontext. func kickoff() { gp := getg() if gp.traceback != 0 { gtraceback(gp) } fv := gp.entry param := gp.param // When running on the g0 stack we can wind up here without a p, // for example from mcall(exitsyscall0) in exitsyscall, in // which case we can not run a write barrier. // It is also possible for us to get here from the systemstack // call in wbBufFlush, at which point the write barrier buffer // is full and we can not run a write barrier. // Setting gp.entry = nil or gp.param = nil will try to run a // write barrier, so if we are on the g0 stack due to mcall // (systemstack calls mcall) then clear the field using uintptr. // This is OK when gp.param is gp.m.curg, as curg will be kept // alive elsewhere, and gp.entry always points into g, or // to a statically allocated value, or (in the case of mcall) // to the stack. if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) { *(*uintptr)(unsafe.Pointer(&gp.entry)) = 0 *(*uintptr)(unsafe.Pointer(&gp.param)) = 0 } else if gp.m.p == 0 { throw("no p in kickoff") } else { gp.entry = nil gp.param = nil } // Record the entry SP to help stack scan. gp.entrysp = getsp() fv(param) goexit1() } func mstart1() { _g_ := getg() if _g_ != _g_.m.g0 { throw("bad runtime·mstart") } asminit() // Install signal handlers; after minit so that minit can // prepare the thread to be able to handle the signals. // For gccgo minit was called by C code. if _g_.m == &m0 { mstartm0() } if fn := _g_.m.mstartfn; fn != nil { fn() } if _g_.m != &m0 { acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } schedule() } // mstartm0 implements part of mstart1 that only runs on the m0. // // Write barriers are allowed here because we know the GC can't be // running yet, so they'll be no-ops. // //go:yeswritebarrierrec func mstartm0() { // Create an extra M for callbacks on threads not created by Go. // An extra M is also needed on Windows for callbacks created by // syscall.NewCallback. See issue #6751 for details. if (iscgo || GOOS == "windows") && !cgoHasExtraM { cgoHasExtraM = true newextram() } initsig(false) } // mexit tears down and exits the current thread. // // Don't call this directly to exit the thread, since it must run at // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to // unwind the stack to the point that exits the thread. // // It is entered with m.p != nil, so write barriers are allowed. It // will release the P before exiting. // //go:yeswritebarrierrec func mexit(osStack bool) { g := getg() m := g.m if m == &m0 { // This is the main thread. Just wedge it. // // On Linux, exiting the main thread puts the process // into a non-waitable zombie state. On Plan 9, // exiting the main thread unblocks wait even though // other threads are still running. On Solaris we can // neither exitThread nor return from mstart. Other // bad things probably happen on other platforms. // // We could try to clean up this M more before wedging // it, but that complicates signal handling. handoffp(releasep()) lock(&sched.lock) sched.nmfreed++ checkdead() unlock(&sched.lock) notesleep(&m.park) throw("locked m0 woke up") } sigblock() unminit() // Free the gsignal stack. if m.gsignal != nil { stackfree(m.gsignal) // On some platforms, when calling into VDSO (e.g. nanotime) // we store our g on the gsignal stack, if there is one. // Now the stack is freed, unlink it from the m, so we // won't write to it when calling VDSO code. m.gsignal = nil } // Remove m from allm. lock(&sched.lock) for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { if *pprev == m { *pprev = m.alllink goto found } } throw("m not found in allm") found: if !osStack { // Delay reaping m until it's done with the stack. // // If this is using an OS stack, the OS will free it // so there's no need for reaping. atomic.Store(&m.freeWait, 1) // Put m on the free list, though it will not be reaped until // freeWait is 0. Note that the free list must not be linked // through alllink because some functions walk allm without // locking, so may be using alllink. m.freelink = sched.freem sched.freem = m } unlock(&sched.lock) // Release the P. handoffp(releasep()) // After this point we must not have write barriers. // Invoke the deadlock detector. This must happen after // handoffp because it may have started a new M to take our // P's work. lock(&sched.lock) sched.nmfreed++ checkdead() unlock(&sched.lock) if osStack { // Return from mstart and let the system thread // library free the g0 stack and terminate the thread. return } // mstart is the thread's entry point, so there's nothing to // return to. Exit the thread directly. exitThread will clear // m.freeWait when it's done with the stack and the m can be // reaped. exitThread(&m.freeWait) } // forEachP calls fn(p) for every P p when p reaches a GC safe point. // If a P is currently executing code, this will bring the P to a GC // safe point and execute fn on that P. If the P is not executing code // (it is idle or in a syscall), this will call fn(p) directly while // preventing the P from exiting its state. This does not ensure that // fn will run on every CPU executing Go code, but it acts as a global // memory barrier. GC uses this as a "ragged barrier." // // The caller must hold worldsema. // //go:systemstack func forEachP(fn func(*p)) { mp := acquirem() _p_ := getg().m.p.ptr() lock(&sched.lock) if sched.safePointWait != 0 { throw("forEachP: sched.safePointWait != 0") } sched.safePointWait = gomaxprocs - 1 sched.safePointFn = fn // Ask all Ps to run the safe point function. for _, p := range allp { if p != _p_ { atomic.Store(&p.runSafePointFn, 1) } } preemptall() // Any P entering _Pidle or _Psyscall from now on will observe // p.runSafePointFn == 1 and will call runSafePointFn when // changing its status to _Pidle/_Psyscall. // Run safe point function for all idle Ps. sched.pidle will // not change because we hold sched.lock. for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { if atomic.Cas(&p.runSafePointFn, 1, 0) { fn(p) sched.safePointWait-- } } wait := sched.safePointWait > 0 unlock(&sched.lock) // Run fn for the current P. fn(_p_) // Force Ps currently in _Psyscall into _Pidle and hand them // off to induce safe point function execution. for _, p := range allp { s := p.status if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { if trace.enabled { traceGoSysBlock(p) traceProcStop(p) } p.syscalltick++ handoffp(p) } } // Wait for remaining Ps to run fn. if wait { for { // Wait for 100us, then try to re-preempt in // case of any races. // // Requires system stack. if notetsleep(&sched.safePointNote, 100*1000) { noteclear(&sched.safePointNote) break } preemptall() } } if sched.safePointWait != 0 { throw("forEachP: not done") } for _, p := range allp { if p.runSafePointFn != 0 { throw("forEachP: P did not run fn") } } lock(&sched.lock) sched.safePointFn = nil unlock(&sched.lock) releasem(mp) } // runSafePointFn runs the safe point function, if any, for this P. // This should be called like // // if getg().m.p.runSafePointFn != 0 { // runSafePointFn() // } // // runSafePointFn must be checked on any transition in to _Pidle or // _Psyscall to avoid a race where forEachP sees that the P is running // just before the P goes into _Pidle/_Psyscall and neither forEachP // nor the P run the safe-point function. func runSafePointFn() { p := getg().m.p.ptr() // Resolve the race between forEachP running the safe-point // function on this P's behalf and this P running the // safe-point function directly. if !atomic.Cas(&p.runSafePointFn, 1, 0) { return } sched.safePointFn(p) lock(&sched.lock) sched.safePointWait-- if sched.safePointWait == 0 { notewakeup(&sched.safePointNote) } unlock(&sched.lock) } // Allocate a new m unassociated with any thread. // Can use p for allocation context if needed. // fn is recorded as the new m's m.mstartfn. // // This function is allowed to have write barriers even if the caller // isn't because it borrows _p_. // //go:yeswritebarrierrec func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) { _g_ := getg() acquirem() // disable GC because it can be called from sysmon if _g_.m.p == 0 { acquirep(_p_) // temporarily borrow p for mallocs in this function } // Release the free M list. We need to do this somewhere and // this may free up a stack we can use. if sched.freem != nil { lock(&sched.lock) var newList *m for freem := sched.freem; freem != nil; { if freem.freeWait != 0 { next := freem.freelink freem.freelink = newList newList = freem freem = next continue } stackfree(freem.g0) freem = freem.freelink } sched.freem = newList unlock(&sched.lock) } mp = new(m) mp.mstartfn = fn mcommoninit(mp) mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize) mp.g0.m = mp if _p_ == _g_.m.p.ptr() { releasep() } releasem(_g_.m) return mp, g0Stack, g0StackSize } // needm is called when a cgo callback happens on a // thread without an m (a thread not created by Go). // In this case, needm is expected to find an m to use // and return with m, g initialized correctly. // Since m and g are not set now (likely nil, but see below) // needm is limited in what routines it can call. In particular // it can only call nosplit functions (textflag 7) and cannot // do any scheduling that requires an m. // // In order to avoid needing heavy lifting here, we adopt // the following strategy: there is a stack of available m's // that can be stolen. Using compare-and-swap // to pop from the stack has ABA races, so we simulate // a lock by doing an exchange (via Casuintptr) to steal the stack // head and replace the top pointer with MLOCKED (1). // This serves as a simple spin lock that we can use even // without an m. The thread that locks the stack in this way // unlocks the stack by storing a valid stack head pointer. // // In order to make sure that there is always an m structure // available to be stolen, we maintain the invariant that there // is always one more than needed. At the beginning of the // program (if cgo is in use) the list is seeded with a single m. // If needm finds that it has taken the last m off the list, its job // is - once it has installed its own m so that it can do things like // allocate memory - to create a spare m and put it on the list. // // Each of these extra m's also has a g0 and a curg that are // pressed into service as the scheduling stack and current // goroutine for the duration of the cgo callback. // // When the callback is done with the m, it calls dropm to // put the m back on the list. //go:nosplit func needm(x byte) { if (iscgo || GOOS == "windows") && !cgoHasExtraM { // Can happen if C/C++ code calls Go from a global ctor. // Can also happen on Windows if a global ctor uses a // callback created by syscall.NewCallback. See issue #6751 // for details. // // Can not throw, because scheduler is not initialized yet. write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) exit(1) } // Lock extra list, take head, unlock popped list. // nilokay=false is safe here because of the invariant above, // that the extra list always contains or will soon contain // at least one m. mp := lockextra(false) // Set needextram when we've just emptied the list, // so that the eventual call into cgocallbackg will // allocate a new m for the extra list. We delay the // allocation until then so that it can be done // after exitsyscall makes sure it is okay to be // running at all (that is, there's no garbage collection // running right now). mp.needextram = mp.schedlink == 0 extraMCount-- unlockextra(mp.schedlink.ptr()) // Save and block signals before installing g. // Once g is installed, any incoming signals will try to execute, // but we won't have the sigaltstack settings and other data // set up appropriately until the end of minit, which will // unblock the signals. This is the same dance as when // starting a new m to run Go code via newosproc. msigsave(mp) sigblock() // Install g (= m->curg). setg(mp.curg) // Initialize this thread to use the m. asminit() minit() setGContext() // mp.curg is now a real goroutine. casgstatus(mp.curg, _Gdead, _Gsyscall) atomic.Xadd(&sched.ngsys, -1) } var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") // newextram allocates m's and puts them on the extra list. // It is called with a working local m, so that it can do things // like call schedlock and allocate. func newextram() { c := atomic.Xchg(&extraMWaiters, 0) if c > 0 { for i := uint32(0); i < c; i++ { oneNewExtraM() } } else { // Make sure there is at least one extra M. mp := lockextra(true) unlockextra(mp) if mp == nil { oneNewExtraM() } } } // oneNewExtraM allocates an m and puts it on the extra list. func oneNewExtraM() { // Create extra goroutine locked to extra m. // The goroutine is the context in which the cgo callback will run. // The sched.pc will never be returned to, but setting it to // goexit makes clear to the traceback routines where // the goroutine stack ends. mp, g0SP, g0SPSize := allocm(nil, nil, true) gp := malg(true, false, nil, nil) // malg returns status as _Gidle. Change to _Gdead before // adding to allg where GC can see it. We use _Gdead to hide // this from tracebacks and stack scans since it isn't a // "real" goroutine until needm grabs it. casgstatus(gp, _Gidle, _Gdead) gp.m = mp mp.curg = gp mp.lockedInt++ mp.lockedg.set(gp) gp.lockedm.set(mp) gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) // put on allg for garbage collector allgadd(gp) // The context for gp will be set up in needm. // Here we need to set the context for g0. makeGContext(mp.g0, g0SP, g0SPSize) // gp is now on the allg list, but we don't want it to be // counted by gcount. It would be more "proper" to increment // sched.ngfree, but that requires locking. Incrementing ngsys // has the same effect. atomic.Xadd(&sched.ngsys, +1) // Add m to the extra list. mnext := lockextra(true) mp.schedlink.set(mnext) extraMCount++ unlockextra(mp) } // dropm is called when a cgo callback has called needm but is now // done with the callback and returning back into the non-Go thread. // It puts the current m back onto the extra list. // // The main expense here is the call to signalstack to release the // m's signal stack, and then the call to needm on the next callback // from this thread. It is tempting to try to save the m for next time, // which would eliminate both these costs, but there might not be // a next time: the current thread (which Go does not control) might exit. // If we saved the m for that thread, there would be an m leak each time // such a thread exited. Instead, we acquire and release an m on each // call. These should typically not be scheduling operations, just a few // atomics, so the cost should be small. // // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread // variable using pthread_key_create. Unlike the pthread keys we already use // on OS X, this dummy key would never be read by Go code. It would exist // only so that we could register at thread-exit-time destructor. // That destructor would put the m back onto the extra list. // This is purely a performance optimization. The current version, // in which dropm happens on each cgo call, is still correct too. // We may have to keep the current version on systems with cgo // but without pthreads, like Windows. // // CgocallBackDone calls this after releasing p, so no write barriers. //go:nowritebarrierrec func dropm() { // Clear m and g, and return m to the extra list. // After the call to setg we can only call nosplit functions // with no pointer manipulation. mp := getg().m // Return mp.curg to dead state. casgstatus(mp.curg, _Gsyscall, _Gdead) mp.curg.preemptStop = false atomic.Xadd(&sched.ngsys, +1) // Block signals before unminit. // Unminit unregisters the signal handling stack (but needs g on some systems). // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. // It's important not to try to handle a signal between those two steps. sigmask := mp.sigmask sigblock() unminit() // gccgo sets the stack to Gdead here, because the splitstack // context is not initialized. atomic.Store(&mp.curg.atomicstatus, _Gdead) mp.curg.gcstack = 0 mp.curg.gcnextsp = 0 mnext := lockextra(true) extraMCount++ mp.schedlink.set(mnext) setg(nil) // Commit the release of mp. unlockextra(mp) msigrestore(sigmask) } // A helper function for EnsureDropM. func getm() uintptr { return uintptr(unsafe.Pointer(getg().m)) } var extram uintptr var extraMCount uint32 // Protected by lockextra var extraMWaiters uint32 // lockextra locks the extra list and returns the list head. // The caller must unlock the list by storing a new list head // to extram. If nilokay is true, then lockextra will // return a nil list head if that's what it finds. If nilokay is false, // lockextra will keep waiting until the list head is no longer nil. //go:nosplit //go:nowritebarrierrec func lockextra(nilokay bool) *m { const locked = 1 incr := false for { old := atomic.Loaduintptr(&extram) if old == locked { yield := osyield yield() continue } if old == 0 && !nilokay { if !incr { // Add 1 to the number of threads // waiting for an M. // This is cleared by newextram. atomic.Xadd(&extraMWaiters, 1) incr = true } usleep(1) continue } if atomic.Casuintptr(&extram, old, locked) { return (*m)(unsafe.Pointer(old)) } yield := osyield yield() continue } } //go:nosplit //go:nowritebarrierrec func unlockextra(mp *m) { atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) } // execLock serializes exec and clone to avoid bugs or unspecified behaviour // around exec'ing while creating/destroying threads. See issue #19546. var execLock rwmutex // newmHandoff contains a list of m structures that need new OS threads. // This is used by newm in situations where newm itself can't safely // start an OS thread. var newmHandoff struct { lock mutex // newm points to a list of M structures that need new OS // threads. The list is linked through m.schedlink. newm muintptr // waiting indicates that wake needs to be notified when an m // is put on the list. waiting bool wake note // haveTemplateThread indicates that the templateThread has // been started. This is not protected by lock. Use cas to set // to 1. haveTemplateThread uint32 } // Create a new m. It will start off with a call to fn, or else the scheduler. // fn needs to be static and not a heap allocated closure. // May run with m.p==nil, so write barriers are not allowed. //go:nowritebarrierrec func newm(fn func(), _p_ *p) { mp, _, _ := allocm(_p_, fn, false) mp.nextp.set(_p_) mp.sigmask = initSigmask if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { // We're on a locked M or a thread that may have been // started by C. The kernel state of this thread may // be strange (the user may have locked it for that // purpose). We don't want to clone that into another // thread. Instead, ask a known-good thread to create // the thread for us. // // This is disabled on Plan 9. See golang.org/issue/22227. // // TODO: This may be unnecessary on Windows, which // doesn't model thread creation off fork. lock(&newmHandoff.lock) if newmHandoff.haveTemplateThread == 0 { throw("on a locked thread with no template thread") } mp.schedlink = newmHandoff.newm newmHandoff.newm.set(mp) if newmHandoff.waiting { newmHandoff.waiting = false notewakeup(&newmHandoff.wake) } unlock(&newmHandoff.lock) return } newm1(mp) } func newm1(mp *m) { execLock.rlock() // Prevent process clone. newosproc(mp) execLock.runlock() } // startTemplateThread starts the template thread if it is not already // running. // // The calling thread must itself be in a known-good state. func startTemplateThread() { if GOARCH == "wasm" { // no threads on wasm yet return } if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { return } newm(templateThread, nil) } // templateThread is a thread in a known-good state that exists solely // to start new threads in known-good states when the calling thread // may not be in a good state. // // Many programs never need this, so templateThread is started lazily // when we first enter a state that might lead to running on a thread // in an unknown state. // // templateThread runs on an M without a P, so it must not have write // barriers. // //go:nowritebarrierrec func templateThread() { lock(&sched.lock) sched.nmsys++ checkdead() unlock(&sched.lock) for { lock(&newmHandoff.lock) for newmHandoff.newm != 0 { newm := newmHandoff.newm.ptr() newmHandoff.newm = 0 unlock(&newmHandoff.lock) for newm != nil { next := newm.schedlink.ptr() newm.schedlink = 0 newm1(newm) newm = next } lock(&newmHandoff.lock) } newmHandoff.waiting = true noteclear(&newmHandoff.wake) unlock(&newmHandoff.lock) notesleep(&newmHandoff.wake) } } // Stops execution of the current m until new work is available. // Returns with acquired P. func stopm() { _g_ := getg() if _g_.m.locks != 0 { throw("stopm holding locks") } if _g_.m.p != 0 { throw("stopm holding p") } if _g_.m.spinning { throw("stopm spinning") } lock(&sched.lock) mput(_g_.m) unlock(&sched.lock) notesleep(&_g_.m.park) noteclear(&_g_.m.park) acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } func mspinning() { // startm's caller incremented nmspinning. Set the new M's spinning. getg().m.spinning = true } // Schedules some M to run the p (creates an M if necessary). // If p==nil, tries to get an idle P, if no idle P's does nothing. // May run with m.p==nil, so write barriers are not allowed. // If spinning is set, the caller has incremented nmspinning and startm will // either decrement nmspinning or set m.spinning in the newly started M. //go:nowritebarrierrec func startm(_p_ *p, spinning bool) { lock(&sched.lock) if _p_ == nil { _p_ = pidleget() if _p_ == nil { unlock(&sched.lock) if spinning { // The caller incremented nmspinning, but there are no idle Ps, // so it's okay to just undo the increment and give up. if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { throw("startm: negative nmspinning") } } return } } mp := mget() unlock(&sched.lock) if mp == nil { var fn func() if spinning { // The caller incremented nmspinning, so set m.spinning in the new M. fn = mspinning } newm(fn, _p_) return } if mp.spinning { throw("startm: m is spinning") } if mp.nextp != 0 { throw("startm: m has p") } if spinning && !runqempty(_p_) { throw("startm: p has runnable gs") } // The caller incremented nmspinning, so set m.spinning in the new M. mp.spinning = spinning mp.nextp.set(_p_) notewakeup(&mp.park) } // Hands off P from syscall or locked M. // Always runs without a P, so write barriers are not allowed. //go:nowritebarrierrec func handoffp(_p_ *p) { // handoffp must start an M in any situation where // findrunnable would return a G to run on _p_. // if it has local work, start it straight away if !runqempty(_p_) || sched.runqsize != 0 { startm(_p_, false) return } // if it has GC work, start it straight away if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { startm(_p_, false) return } // no local work, check that there are no spinning/idle M's, // otherwise our help is not required if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic startm(_p_, true) return } lock(&sched.lock) if sched.gcwaiting != 0 { _p_.status = _Pgcstop sched.stopwait-- if sched.stopwait == 0 { notewakeup(&sched.stopnote) } unlock(&sched.lock) return } if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { sched.safePointFn(_p_) sched.safePointWait-- if sched.safePointWait == 0 { notewakeup(&sched.safePointNote) } } if sched.runqsize != 0 { unlock(&sched.lock) startm(_p_, false) return } // If this is the last running P and nobody is polling network, // need to wakeup another M to poll network. if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { unlock(&sched.lock) startm(_p_, false) return } if when := nobarrierWakeTime(_p_); when != 0 { wakeNetPoller(when) } pidleput(_p_) unlock(&sched.lock) } // Tries to add one more P to execute G's. // Called when a G is made runnable (newproc, ready). func wakep() { // be conservative about spinning threads if !atomic.Cas(&sched.nmspinning, 0, 1) { return } startm(nil, true) } // Stops execution of the current m that is locked to a g until the g is runnable again. // Returns with acquired P. func stoplockedm() { _g_ := getg() if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { throw("stoplockedm: inconsistent locking") } if _g_.m.p != 0 { // Schedule another M to run this p. _p_ := releasep() handoffp(_p_) } incidlelocked(1) // Wait until another thread schedules lockedg again. notesleep(&_g_.m.park) noteclear(&_g_.m.park) status := readgstatus(_g_.m.lockedg.ptr()) if status&^_Gscan != _Grunnable { print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") dumpgstatus(_g_) throw("stoplockedm: not runnable") } acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } // Schedules the locked m to run the locked gp. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func startlockedm(gp *g) { _g_ := getg() mp := gp.lockedm.ptr() if mp == _g_.m { throw("startlockedm: locked to me") } if mp.nextp != 0 { throw("startlockedm: m has p") } // directly handoff current P to the locked m incidlelocked(-1) _p_ := releasep() mp.nextp.set(_p_) notewakeup(&mp.park) stopm() } // Stops the current m for stopTheWorld. // Returns when the world is restarted. func gcstopm() { _g_ := getg() if sched.gcwaiting == 0 { throw("gcstopm: not waiting for gc") } if _g_.m.spinning { _g_.m.spinning = false // OK to just drop nmspinning here, // startTheWorld will unpark threads as necessary. if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { throw("gcstopm: negative nmspinning") } } _p_ := releasep() lock(&sched.lock) _p_.status = _Pgcstop sched.stopwait-- if sched.stopwait == 0 { notewakeup(&sched.stopnote) } unlock(&sched.lock) stopm() } // Schedules gp to run on the current M. // If inheritTime is true, gp inherits the remaining time in the // current time slice. Otherwise, it starts a new time slice. // Never returns. // // Write barriers are allowed because this is called immediately after // acquiring a P in several places. // //go:yeswritebarrierrec func execute(gp *g, inheritTime bool) { _g_ := getg() // Assign gp.m before entering _Grunning so running Gs have an // M. _g_.m.curg = gp gp.m = _g_.m casgstatus(gp, _Grunnable, _Grunning) gp.waitsince = 0 gp.preempt = false if !inheritTime { _g_.m.p.ptr().schedtick++ } // Check whether the profiler needs to be turned on or off. hz := sched.profilehz if _g_.m.profilehz != hz { setThreadCPUProfiler(hz) } if trace.enabled { // GoSysExit has to happen when we have a P, but before GoStart. // So we emit it here. if gp.syscallsp != 0 && gp.sysblocktraced { traceGoSysExit(gp.sysexitticks) } traceGoStart() } gogo(gp) } // Finds a runnable goroutine to execute. // Tries to steal from other P's, get g from local or global queue, poll network. func findrunnable() (gp *g, inheritTime bool) { _g_ := getg() // The conditions here and in handoffp must agree: if // findrunnable would return a G to run, handoffp must start // an M. top: _p_ := _g_.m.p.ptr() if sched.gcwaiting != 0 { gcstopm() goto top } if _p_.runSafePointFn != 0 { runSafePointFn() } now, pollUntil, _ := checkTimers(_p_, 0) if fingwait && fingwake { if gp := wakefing(); gp != nil { ready(gp, 0, true) } } if *cgo_yield != nil { asmcgocall(*cgo_yield, nil) } // local runq if gp, inheritTime := runqget(_p_); gp != nil { return gp, inheritTime } // global runq if sched.runqsize != 0 { lock(&sched.lock) gp := globrunqget(_p_, 0) unlock(&sched.lock) if gp != nil { return gp, false } } // Poll network. // This netpoll is only an optimization before we resort to stealing. // We can safely skip it if there are no waiters or a thread is blocked // in netpoll already. If there is any kind of logical race with that // blocked thread (e.g. it has already returned from netpoll, but does // not set lastpoll yet), this thread will do blocking netpoll below // anyway. if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { if list := netpoll(0); !list.empty() { // non-blocking gp := list.pop() injectglist(&list) casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) } return gp, false } } // Steal work from other P's. procs := uint32(gomaxprocs) ranTimer := false // If number of spinning M's >= number of busy P's, block. // This is necessary to prevent excessive CPU consumption // when GOMAXPROCS>>1 but the program parallelism is low. if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { goto stop } if !_g_.m.spinning { _g_.m.spinning = true atomic.Xadd(&sched.nmspinning, 1) } for i := 0; i < 4; i++ { for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { if sched.gcwaiting != 0 { goto top } stealRunNextG := i > 2 // first look for ready queues with more than 1 g p2 := allp[enum.position()] if _p_ == p2 { continue } if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil { return gp, false } // Consider stealing timers from p2. // This call to checkTimers is the only place where // we hold a lock on a different P's timers. // Lock contention can be a problem here, so avoid // grabbing the lock if p2 is running and not marked // for preemption. If p2 is running and not being // preempted we assume it will handle its own timers. if i > 2 && shouldStealTimers(p2) { tnow, w, ran := checkTimers(p2, now) now = tnow if w != 0 && (pollUntil == 0 || w < pollUntil) { pollUntil = w } if ran { // Running the timers may have // made an arbitrary number of G's // ready and added them to this P's // local run queue. That invalidates // the assumption of runqsteal // that is always has room to add // stolen G's. So check now if there // is a local G to run. if gp, inheritTime := runqget(_p_); gp != nil { return gp, inheritTime } ranTimer = true } } } } if ranTimer { // Running a timer may have made some goroutine ready. goto top } stop: // We have nothing to do. If we're in the GC mark phase, can // safely scan and blacken objects, and have work to do, run // idle-time marking rather than give up the P. if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode gp := _p_.gcBgMarkWorker.ptr() casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) } return gp, false } delta := int64(-1) if pollUntil != 0 { // checkTimers ensures that polluntil > now. delta = pollUntil - now } // wasm only: // If a callback returned and no other goroutine is awake, // then pause execution until a callback was triggered. if beforeIdle(delta) { // At least one goroutine got woken. goto top } // Before we drop our P, make a snapshot of the allp slice, // which can change underfoot once we no longer block // safe-points. We don't need to snapshot the contents because // everything up to cap(allp) is immutable. allpSnapshot := allp // return P and block lock(&sched.lock) if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { unlock(&sched.lock) goto top } if sched.runqsize != 0 { gp := globrunqget(_p_, 0) unlock(&sched.lock) return gp, false } if releasep() != _p_ { throw("findrunnable: wrong p") } pidleput(_p_) unlock(&sched.lock) // Delicate dance: thread transitions from spinning to non-spinning state, // potentially concurrently with submission of new goroutines. We must // drop nmspinning first and then check all per-P queues again (with // #StoreLoad memory barrier in between). If we do it the other way around, // another thread can submit a goroutine after we've checked all run queues // but before we drop nmspinning; as the result nobody will unpark a thread // to run the goroutine. // If we discover new work below, we need to restore m.spinning as a signal // for resetspinning to unpark a new worker thread (because there can be more // than one starving goroutine). However, if after discovering new work // we also observe no idle Ps, it is OK to just park the current thread: // the system is fully loaded so no spinning threads are required. // Also see "Worker thread parking/unparking" comment at the top of the file. wasSpinning := _g_.m.spinning if _g_.m.spinning { _g_.m.spinning = false if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { throw("findrunnable: negative nmspinning") } } // check all runqueues once again for _, _p_ := range allpSnapshot { if !runqempty(_p_) { lock(&sched.lock) _p_ = pidleget() unlock(&sched.lock) if _p_ != nil { acquirep(_p_) if wasSpinning { _g_.m.spinning = true atomic.Xadd(&sched.nmspinning, 1) } goto top } break } } // Check for idle-priority GC work again. if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { lock(&sched.lock) _p_ = pidleget() if _p_ != nil && _p_.gcBgMarkWorker == 0 { pidleput(_p_) _p_ = nil } unlock(&sched.lock) if _p_ != nil { acquirep(_p_) if wasSpinning { _g_.m.spinning = true atomic.Xadd(&sched.nmspinning, 1) } // Go back to idle GC check. goto stop } } // poll network if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { atomic.Store64(&sched.pollUntil, uint64(pollUntil)) if _g_.m.p != 0 { throw("findrunnable: netpoll with p") } if _g_.m.spinning { throw("findrunnable: netpoll with spinning") } if faketime != 0 { // When using fake time, just poll. delta = 0 } list := netpoll(delta) // block until new work is available atomic.Store64(&sched.pollUntil, 0) atomic.Store64(&sched.lastpoll, uint64(nanotime())) if faketime != 0 && list.empty() { // Using fake time and nothing is ready; stop M. // When all M's stop, checkdead will call timejump. stopm() goto top } lock(&sched.lock) _p_ = pidleget() unlock(&sched.lock) if _p_ == nil { injectglist(&list) } else { acquirep(_p_) if !list.empty() { gp := list.pop() injectglist(&list) casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) } return gp, false } if wasSpinning { _g_.m.spinning = true atomic.Xadd(&sched.nmspinning, 1) } goto top } } else if pollUntil != 0 && netpollinited() { pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) if pollerPollUntil == 0 || pollerPollUntil > pollUntil { netpollBreak() } } stopm() goto top } // pollWork reports whether there is non-background work this P could // be doing. This is a fairly lightweight check to be used for // background work loops, like idle GC. It checks a subset of the // conditions checked by the actual scheduler. func pollWork() bool { if sched.runqsize != 0 { return true } p := getg().m.p.ptr() if !runqempty(p) { return true } if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { if list := netpoll(0); !list.empty() { injectglist(&list) return true } } return false } // wakeNetPoller wakes up the thread sleeping in the network poller, // if there is one, and if it isn't going to wake up anyhow before // the when argument. func wakeNetPoller(when int64) { if atomic.Load64(&sched.lastpoll) == 0 { // In findrunnable we ensure that when polling the pollUntil // field is either zero or the time to which the current // poll is expected to run. This can have a spurious wakeup // but should never miss a wakeup. pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) if pollerPollUntil == 0 || pollerPollUntil > when { netpollBreak() } } } func resetspinning() { _g_ := getg() if !_g_.m.spinning { throw("resetspinning: not a spinning m") } _g_.m.spinning = false nmspinning := atomic.Xadd(&sched.nmspinning, -1) if int32(nmspinning) < 0 { throw("findrunnable: negative nmspinning") } // M wakeup policy is deliberately somewhat conservative, so check if we // need to wakeup another P here. See "Worker thread parking/unparking" // comment at the top of the file for details. if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { wakep() } } // Injects the list of runnable G's into the scheduler and clears glist. // Can run concurrently with GC. func injectglist(glist *gList) { if glist.empty() { return } if trace.enabled { for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { traceGoUnpark(gp, 0) } } lock(&sched.lock) var n int for n = 0; !glist.empty(); n++ { gp := glist.pop() casgstatus(gp, _Gwaiting, _Grunnable) globrunqput(gp) } unlock(&sched.lock) for ; n != 0 && sched.npidle != 0; n-- { startm(nil, false) } *glist = gList{} } // One round of scheduler: find a runnable goroutine and execute it. // Never returns. func schedule() { _g_ := getg() if _g_.m.locks != 0 { throw("schedule: holding locks") } if _g_.m.lockedg != 0 { stoplockedm() execute(_g_.m.lockedg.ptr(), false) // Never returns. } // We should not schedule away from a g that is executing a cgo call, // since the cgo call is using the m's g0 stack. if _g_.m.incgo { throw("schedule: in cgo") } top: pp := _g_.m.p.ptr() pp.preempt = false if sched.gcwaiting != 0 { gcstopm() goto top } if pp.runSafePointFn != 0 { runSafePointFn() } // Sanity check: if we are spinning, the run queue should be empty. // Check this before calling checkTimers, as that might call // goready to put a ready goroutine on the local run queue. if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { throw("schedule: spinning with local work") } checkTimers(pp, 0) var gp *g var inheritTime bool // Normal goroutines will check for need to wakeP in ready, // but GCworkers and tracereaders will not, so the check must // be done here instead. tryWakeP := false if trace.enabled || trace.shutdown { gp = traceReader() if gp != nil { casgstatus(gp, _Gwaiting, _Grunnable) traceGoUnpark(gp, 0) tryWakeP = true } } if gp == nil && gcBlackenEnabled != 0 { gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) tryWakeP = tryWakeP || gp != nil } if gp == nil { // Check the global runnable queue once in a while to ensure fairness. // Otherwise two goroutines can completely occupy the local runqueue // by constantly respawning each other. if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { lock(&sched.lock) gp = globrunqget(_g_.m.p.ptr(), 1) unlock(&sched.lock) } } if gp == nil { gp, inheritTime = runqget(_g_.m.p.ptr()) // We can see gp != nil here even if the M is spinning, // if checkTimers added a local goroutine via goready. // Because gccgo does not implement preemption as a stack check, // we need to check for preemption here for fairness. // Otherwise goroutines on the local queue may starve // goroutines on the global queue. // Since we preempt by storing the goroutine on the global // queue, this is the only place we need to check preempt. // This does not call checkPreempt because gp is not running. if gp != nil && gp.preempt { gp.preempt = false lock(&sched.lock) globrunqput(gp) unlock(&sched.lock) goto top } } if gp == nil { gp, inheritTime = findrunnable() // blocks until work is available } // This thread is going to run a goroutine and is not spinning anymore, // so if it was marked as spinning we need to reset it now and potentially // start a new spinning M. if _g_.m.spinning { resetspinning() } if sched.disable.user && !schedEnabled(gp) { // Scheduling of this goroutine is disabled. Put it on // the list of pending runnable goroutines for when we // re-enable user scheduling and look again. lock(&sched.lock) if schedEnabled(gp) { // Something re-enabled scheduling while we // were acquiring the lock. unlock(&sched.lock) } else { sched.disable.runnable.pushBack(gp) sched.disable.n++ unlock(&sched.lock) goto top } } // If about to schedule a not-normal goroutine (a GCworker or tracereader), // wake a P if there is one. if tryWakeP { if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { wakep() } } if gp.lockedm != 0 { // Hands off own p to the locked m, // then blocks waiting for a new p. startlockedm(gp) goto top } execute(gp, inheritTime) } // dropg removes the association between m and the current goroutine m->curg (gp for short). // Typically a caller sets gp's status away from Grunning and then // immediately calls dropg to finish the job. The caller is also responsible // for arranging that gp will be restarted using ready at an // appropriate time. After calling dropg and arranging for gp to be // readied later, the caller can do other work but eventually should // call schedule to restart the scheduling of goroutines on this m. func dropg() { _g_ := getg() setMNoWB(&_g_.m.curg.m, nil) setGNoWB(&_g_.m.curg, nil) } // checkTimers runs any timers for the P that are ready. // If now is not 0 it is the current time. // It returns the current time or 0 if it is not known, // and the time when the next timer should run or 0 if there is no next timer, // and reports whether it ran any timers. // If the time when the next timer should run is not 0, // it is always larger than the returned time. // We pass now in and out to avoid extra calls of nanotime. //go:yeswritebarrierrec func check
p, now int64) (rnow, pollUntil int64, ran bool) { // If there are no timers to adjust, and the first timer on // the heap is not yet ready to run, then there is nothing to do. if atomic.Load(&pp.adjustTimers) == 0 { next := int64(atomic.Load64(&pp.timer0When)) if next == 0 { return now, 0, false } if now == 0 { now = nanotime() } if now < next { // Next timer is not ready to run. // But keep going if we would clear deleted timers. // This corresponds to the condition below where // we decide whether to call clearDeletedTimers. if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { return now, next, false } } } lock(&pp.timersLock) adjusttimers(pp) rnow = now if len(pp.timers) > 0 { if rnow == 0 { rnow = nanotime() } for len(pp.timers) > 0 { // Note that runtimer may temporarily unlock // pp.timersLock. if tw := runtimer(pp, rnow); tw != 0 { if tw > 0 { pollUntil = tw } break } ran = true } } // If this is the local P, and there are a lot of deleted timers, // clear them out. We only do this for the local P to reduce // lock contention on timersLock. if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { clearDeletedTimers(pp) } unlock(&pp.timersLock) return rnow, pollUntil, ran } // shouldStealTimers reports whether we should try stealing the timers from p2. // We don't steal timers from a running P that is not marked for preemption, // on the assumption that it will run its own timers. This reduces // contention on the timers lock. func shouldStealTimers(p2 *p) bool { if p2.status != _Prunning { return true } mp := p2.m.ptr() if mp == nil || mp.locks > 0 { return false } gp := mp.curg if gp == nil || gp.atomicstatus != _Grunning || !gp.preempt { return false } return true } func parkunlock_c(gp *g, lock unsafe.Pointer) bool { unlock((*mutex)(lock)) return true } // park continuation on g0. func park_m(gp *g) { _g_ := getg() if trace.enabled { traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) } casgstatus(gp, _Grunning, _Gwaiting) dropg() if fn := _g_.m.waitunlockf; fn != nil { ok := fn(gp, _g_.m.waitlock) _g_.m.waitunlockf = nil _g_.m.waitlock = nil if !ok { if trace.enabled { traceGoUnpark(gp, 2) } casgstatus(gp, _Gwaiting, _Grunnable) execute(gp, true) // Schedule it back, never returns. } } schedule() } func goschedImpl(gp *g) { status := readgstatus(gp) if status&^_Gscan != _Grunning { dumpgstatus(gp) throw("bad g status") } casgstatus(gp, _Grunning, _Grunnable) dropg() lock(&sched.lock) globrunqput(gp) unlock(&sched.lock) schedule() } // Gosched continuation on g0. func gosched_m(gp *g) { if trace.enabled { traceGoSched() } goschedImpl(gp) } // goschedguarded is a forbidden-states-avoided version of gosched_m func goschedguarded_m(gp *g) { if !canPreemptM(gp.m) { gogo(gp) // never return } if trace.enabled { traceGoSched() } goschedImpl(gp) } func gopreempt_m(gp *g) { if trace.enabled { traceGoPreempt() } goschedImpl(gp) } // preemptPark parks gp and puts it in _Gpreempted. // //go:systemstack func preemptPark(gp *g) { if trace.enabled { traceGoPark(traceEvGoBlock, 0) } status := readgstatus(gp) if status&^_Gscan != _Grunning { dumpgstatus(gp) throw("bad g status") } gp.waitreason = waitReasonPreempted // Transition from _Grunning to _Gscan|_Gpreempted. We can't // be in _Grunning when we dropg because then we'd be running // without an M, but the moment we're in _Gpreempted, // something could claim this G before we've fully cleaned it // up. Hence, we set the scan bit to lock down further // transitions until we can dropg. casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) dropg() casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted) schedule() } // goyield is like Gosched, but it: // - emits a GoPreempt trace event instead of a GoSched trace event // - puts the current G on the runq of the current P instead of the globrunq func goyield() { checkTimeouts() mcall(goyield_m) } func goyield_m(gp *g) { if trace.enabled { traceGoPreempt() } pp := gp.m.p.ptr() casgstatus(gp, _Grunning, _Grunnable) dropg() runqput(pp, gp, false) schedule() } // Finishes execution of the current goroutine. func goexit1() { if trace.enabled { traceGoEnd() } mcall(goexit0) } // goexit continuation on g0. func goexit0(gp *g) { _g_ := getg() casgstatus(gp, _Grunning, _Gdead) if isSystemGoroutine(gp, false) { atomic.Xadd(&sched.ngsys, -1) gp.isSystemGoroutine = false } gp.m = nil locked := gp.lockedm != 0 gp.lockedm = 0 _g_.m.lockedg = 0 gp.entry = nil gp.preemptStop = false gp.paniconfault = false gp._defer = nil // should be true already but just in case. gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. gp.writebuf = nil gp.waitreason = 0 gp.param = nil gp.labels = nil gp.timer = nil if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { // Flush assist credit to the global pool. This gives // better information to pacing if the application is // rapidly creating an exiting goroutines. scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes)) atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) gp.gcAssistBytes = 0 } dropg() if GOARCH == "wasm" { // no threads yet on wasm gfput(_g_.m.p.ptr(), gp) schedule() // never returns } if _g_.m.lockedInt != 0 { print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") throw("internal lockOSThread error") } gfput(_g_.m.p.ptr(), gp) if locked { // The goroutine may have locked this thread because // it put it in an unusual kernel state. Kill it // rather than returning it to the thread pool. // Return to mstart, which will release the P and exit // the thread. if GOOS != "plan9" { // See golang.org/issue/22227. _g_.m.exiting = true gogo(_g_.m.g0) } else { // Clear lockedExt on plan9 since we may end up re-using // this thread. _g_.m.lockedExt = 0 } } schedule() } // The goroutine g is about to enter a system call. // Record that it's not using the cpu anymore. // This is called only from the go syscall library and cgocall, // not from the low-level system calls used by the runtime. // // The entersyscall function is written in C, so that it can save the // current register context so that the GC will see them. // It calls reentersyscall. // // Syscall tracing: // At the start of a syscall we emit traceGoSysCall to capture the stack trace. // If the syscall does not block, that is it, we do not emit any other events. // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; // when syscall returns we emit traceGoSysExit and when the goroutine starts running // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), // whoever emits traceGoSysBlock increments p.syscalltick afterwards; // and we wait for the increment before emitting traceGoSysExit. // Note that the increment is done even if tracing is not enabled, // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. // //go:nosplit //go:noinline func reentersyscall(pc, sp uintptr) { _g_ := getg() // Disable preemption because during this function g is in Gsyscall status, // but can have inconsistent g->sched, do not let GC observe it. _g_.m.locks++ _g_.syscallsp = sp _g_.syscallpc = pc casgstatus(_g_, _Grunning, _Gsyscall) if trace.enabled { systemstack(traceGoSysCall) } if atomic.Load(&sched.sysmonwait) != 0 { systemstack(entersyscall_sysmon) } if _g_.m.p.ptr().runSafePointFn != 0 { // runSafePointFn may stack split if run on this stack systemstack(runSafePointFn) } _g_.m.syscalltick = _g_.m.p.ptr().syscalltick _g_.sysblocktraced = true _g_.m.mcache = nil pp := _g_.m.p.ptr() pp.m = 0 _g_.m.oldp.set(pp) _g_.m.p = 0 atomic.Store(&pp.status, _Psyscall) if sched.gcwaiting != 0 { systemstack(entersyscall_gcwait) } _g_.m.locks-- } func entersyscall_sysmon() { lock(&sched.lock) if atomic.Load(&sched.sysmonwait) != 0 { atomic.Store(&sched.sysmonwait, 0) notewakeup(&sched.sysmonnote) } unlock(&sched.lock) } func entersyscall_gcwait() { _g_ := getg() _p_ := _g_.m.oldp.ptr() lock(&sched.lock) if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { if trace.enabled { traceGoSysBlock(_p_) traceProcStop(_p_) } _p_.syscalltick++ if sched.stopwait--; sched.stopwait == 0 { notewakeup(&sched.stopnote) } } unlock(&sched.lock) } func reentersyscallblock(pc, sp uintptr) { _g_ := getg() _g_.m.locks++ // see comment in entersyscall _g_.throwsplit = true _g_.m.syscalltick = _g_.m.p.ptr().syscalltick _g_.sysblocktraced = true _g_.m.p.ptr().syscalltick++ // Leave SP around for GC and traceback. _g_.syscallsp = sp _g_.syscallpc = pc casgstatus(_g_, _Grunning, _Gsyscall) systemstack(entersyscallblock_handoff) _g_.m.locks-- } func entersyscallblock_handoff() { if trace.enabled { traceGoSysCall() traceGoSysBlock(getg().m.p.ptr()) } handoffp(releasep()) } // The goroutine g exited its system call. // Arrange for it to run on a cpu again. // This is called only from the go syscall library, not // from the low-level system calls used by the runtime. // // Write barriers are not allowed because our P may have been stolen. // //go:nosplit //go:nowritebarrierrec func exitsyscall() { _g_ := getg() _g_.m.locks++ // see comment in entersyscall _g_.waitsince = 0 oldp := _g_.m.oldp.ptr() _g_.m.oldp = 0 if exitsyscallfast(oldp) { if _g_.m.mcache == nil { throw("lost mcache") } if trace.enabled { if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { systemstack(traceGoStart) } } // There's a cpu for us, so we can run. _g_.m.p.ptr().syscalltick++ // We need to cas the status and scan before resuming... casgstatus(_g_, _Gsyscall, _Grunning) exitsyscallclear(_g_) _g_.m.locks-- _g_.throwsplit = false // Check preemption, since unlike gc we don't check on // every call. if getg().preempt { checkPreempt() } _g_.throwsplit = false if sched.disable.user && !schedEnabled(_g_) { // Scheduling of this goroutine is disabled. Gosched() } return } _g_.sysexitticks = 0 if trace.enabled { // Wait till traceGoSysBlock event is emitted. // This ensures consistency of the trace (the goroutine is started after it is blocked). for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { osyield() } // We can't trace syscall exit right now because we don't have a P. // Tracing code can invoke write barriers that cannot run without a P. // So instead we remember the syscall exit time and emit the event // in execute when we have a P. _g_.sysexitticks = cputicks() } _g_.m.locks-- // Call the scheduler. mcall(exitsyscall0) if _g_.m.mcache == nil { throw("lost mcache") } // Scheduler returned, so we're allowed to run now. // Delete the syscallsp information that we left for // the garbage collector during the system call. // Must wait until now because until gosched returns // we don't know for sure that the garbage collector // is not running. exitsyscallclear(_g_) _g_.m.p.ptr().syscalltick++ _g_.throwsplit = false } //go:nosplit func exitsyscallfast(oldp *p) bool { _g_ := getg() // Freezetheworld sets stopwait but does not retake P's. if sched.stopwait == freezeStopWait { return false } // Try to re-acquire the last P. if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { // There's a cpu for us, so we can run. wirep(oldp) exitsyscallfast_reacquired() return true } // Try to get any other idle P. if sched.pidle != 0 { var ok bool systemstack(func() { ok = exitsyscallfast_pidle() if ok && trace.enabled { if oldp != nil { // Wait till traceGoSysBlock event is emitted. // This ensures consistency of the trace (the goroutine is started after it is blocked). for oldp.syscalltick == _g_.m.syscalltick { osyield() } } traceGoSysExit(0) } }) if ok { return true } } return false } // exitsyscallfast_reacquired is the exitsyscall path on which this G // has successfully reacquired the P it was running on before the // syscall. // //go:nosplit func exitsyscallfast_reacquired() { _g_ := getg() if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { if trace.enabled { // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). // traceGoSysBlock for this syscall was already emitted, // but here we effectively retake the p from the new syscall running on the same p. systemstack(func() { // Denote blocking of the new syscall. traceGoSysBlock(_g_.m.p.ptr()) // Denote completion of the current syscall. traceGoSysExit(0) }) } _g_.m.p.ptr().syscalltick++ } } func exitsyscallfast_pidle() bool { lock(&sched.lock) _p_ := pidleget() if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { atomic.Store(&sched.sysmonwait, 0) notewakeup(&sched.sysmonnote) } unlock(&sched.lock) if _p_ != nil { acquirep(_p_) return true } return false } // exitsyscall slow path on g0. // Failed to acquire P, enqueue gp as runnable. // //go:nowritebarrierrec func exitsyscall0(gp *g) { _g_ := getg() casgstatus(gp, _Gsyscall, _Gexitingsyscall) dropg() casgstatus(gp, _Gexitingsyscall, _Grunnable) lock(&sched.lock) var _p_ *p if schedEnabled(_g_) { _p_ = pidleget() } if _p_ == nil { globrunqput(gp) } else if atomic.Load(&sched.sysmonwait) != 0 { atomic.Store(&sched.sysmonwait, 0) notewakeup(&sched.sysmonnote) } unlock(&sched.lock) if _p_ != nil { acquirep(_p_) execute(gp, false) // Never returns. } if _g_.m.lockedg != 0 { // Wait until another thread schedules gp and so m again. stoplockedm() execute(gp, false) // Never returns. } stopm() schedule() // Never returns. } // exitsyscallclear clears GC-related information that we only track // during a syscall. func exitsyscallclear(gp *g) { // Garbage collector isn't running (since we are), so okay to // clear syscallsp. gp.syscallsp = 0 gp.gcstack = 0 gp.gcnextsp = 0 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs)) } // Code generated by cgo, and some library code, calls syscall.Entersyscall // and syscall.Exitsyscall. //go:linkname syscall_entersyscall syscall.Entersyscall //go:nosplit func syscall_entersyscall() { entersyscall() } //go:linkname syscall_exitsyscall syscall.Exitsyscall //go:nosplit func syscall_exitsyscall() { exitsyscall() } func beforefork() { gp := getg().m.curg // Block signals during a fork, so that the child does not run // a signal handler before exec if a signal is sent to the process // group. See issue #18600. gp.m.locks++ msigsave(gp.m) sigblock() } // Called from syscall package before fork. //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork //go:nosplit func syscall_runtime_BeforeFork() { systemstack(beforefork) } func afterfork() { gp := getg().m.curg msigrestore(gp.m.sigmask) gp.m.locks-- } // Called from syscall package after fork in parent. //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork //go:nosplit func syscall_runtime_AfterFork() { systemstack(afterfork) } // inForkedChild is true while manipulating signals in the child process. // This is used to avoid calling libc functions in case we are using vfork. var inForkedChild bool // Called from syscall package after fork in child. // It resets non-sigignored signals to the default handler, and // restores the signal mask in preparation for the exec. // // Because this might be called during a vfork, and therefore may be // temporarily sharing address space with the parent process, this must // not change any global variables or calling into C code that may do so. // //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild //go:nosplit //go:nowritebarrierrec func syscall_runtime_AfterForkInChild() { // It's OK to change the global variable inForkedChild here // because we are going to change it back. There is no race here, // because if we are sharing address space with the parent process, // then the parent process can not be running concurrently. inForkedChild = true clearSignalHandlers() // When we are the child we are the only thread running, // so we know that nothing else has changed gp.m.sigmask. msigrestore(getg().m.sigmask) inForkedChild = false } // Called from syscall package before Exec. //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec func syscall_runtime_BeforeExec() { // Prevent thread creation during exec. execLock.lock() } // Called from syscall package after Exec. //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec func syscall_runtime_AfterExec() { execLock.unlock() } // panicgonil is used for gccgo as we need to use a compiler check for // a nil func, in case we have to build a thunk. //go:linkname panicgonil func panicgonil() { getg().m.throwing = -1 // do not dump full stacks throw("go of nil func value") } // Create a new g running fn passing arg as the single argument. // Put it on the queue of g's waiting to run. // The compiler turns a go statement into a call to this. //go:linkname newproc __go_go func newproc(fn uintptr, arg unsafe.Pointer) *g { _g_ := getg() if fn == 0 { _g_.m.throwing = -1 // do not dump full stacks throw("go of nil func value") } acquirem() // disable preemption because it can be holding p in a local var _p_ := _g_.m.p.ptr() newg := gfget(_p_) var ( sp unsafe.Pointer spsize uintptr ) if newg == nil { newg = malg(true, false, &sp, &spsize) casgstatus(newg, _Gidle, _Gdead) allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. } else { resetNewG(newg, &sp, &spsize) } newg.traceback = 0 if readgstatus(newg) != _Gdead { throw("newproc1: new g is not Gdead") } // Store the C function pointer into entryfn, take the address // of entryfn, convert it to a Go function value, and store // that in entry. newg.entryfn = fn var entry func(unsafe.Pointer) *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn) newg.entry = entry newg.param = arg newg.gopc = getcallerpc() newg.ancestors = saveAncestors(_g_) newg.startpc = fn if _g_.m.curg != nil { newg.labels = _g_.m.curg.labels } if isSystemGoroutine(newg, false) { atomic.Xadd(&sched.ngsys, +1) } casgstatus(newg, _Gdead, _Grunnable) if _p_.goidcache == _p_.goidcacheend { // Sched.goidgen is the last allocated id, // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. // At startup sched.goidgen=0, so main goroutine receives goid=1. _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) _p_.goidcache -= _GoidCacheBatch - 1 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch } newg.goid = int64(_p_.goidcache) _p_.goidcache++ if trace.enabled { traceGoCreate(newg, newg.startpc) } makeGContext(newg, sp, spsize) runqput(_p_, newg, true) if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { wakep() } releasem(_g_.m) return newg } // expectedSystemGoroutines counts the number of goroutines expected // to mark themselves as system goroutines. After they mark themselves // by calling setSystemGoroutine, this is decremented. NumGoroutines // uses this to wait for all system goroutines to mark themselves // before it counts them. var expectedSystemGoroutines uint32 // expectSystemGoroutine is called when starting a goroutine that will // call setSystemGoroutine. It increments expectedSystemGoroutines. func expectSystemGoroutine() { atomic.Xadd(&expectedSystemGoroutines, +1) } // waitForSystemGoroutines waits for all currently expected system // goroutines to register themselves. func waitForSystemGoroutines() { for atomic.Load(&expectedSystemGoroutines) > 0 { Gosched() osyield() } } // setSystemGoroutine marks this goroutine as a "system goroutine". // In the gc toolchain this is done by comparing startpc to a list of // saved special PCs. In gccgo that approach does not work as startpc // is often a thunk that invokes the real function with arguments, // so the thunk address never matches the saved special PCs. Instead, // since there are only a limited number of "system goroutines", // we force each one to mark itself as special. func setSystemGoroutine() { getg().isSystemGoroutine = true atomic.Xadd(&sched.ngsys, +1) atomic.Xadd(&expectedSystemGoroutines, -1) } // saveAncestors copies previous ancestors of the given caller g and // includes infor for the current caller into a new set of tracebacks for // a g being created. func saveAncestors(callergp *g) *[]ancestorInfo { // Copy all prior info, except for the root goroutine (goid 0). if debug.tracebackancestors <= 0 || callergp.goid == 0 { return nil } var callerAncestors []ancestorInfo if callergp.ancestors != nil { callerAncestors = *callergp.ancestors } n := int32(len(callerAncestors)) + 1 if n > debug.tracebackancestors { n = debug.tracebackancestors } ancestors := make([]ancestorInfo, n) copy(ancestors[1:], callerAncestors) var pcs [_TracebackMaxFrames]uintptr // FIXME: This should get a traceback of callergp. // npcs := gcallers(callergp, 0, pcs[:]) npcs := 0 ipcs := make([]uintptr, npcs) copy(ipcs, pcs[:]) ancestors[0] = ancestorInfo{ pcs: ipcs, goid: callergp.goid, gopc: callergp.gopc, } ancestorsp := new([]ancestorInfo) *ancestorsp = ancestors return ancestorsp } // Put on gfree list. // If local list is too long, transfer a batch to the global list. func gfput(_p_ *p, gp *g) { if readgstatus(gp) != _Gdead { throw("gfput: bad status (not Gdead)") } _p_.gFree.push(gp) _p_.gFree.n++ if _p_.gFree.n >= 64 { lock(&sched.gFree.lock) for _p_.gFree.n >= 32 { _p_.gFree.n-- gp = _p_.gFree.pop() sched.gFree.list.push(gp) sched.gFree.n++ } unlock(&sched.gFree.lock) } } // Get from gfree list. // If local list is empty, grab a batch from global list. func gfget(_p_ *p) *g { retry: if _p_.gFree.empty() && !sched.gFree.list.empty() { lock(&sched.gFree.lock) // Move a batch of free Gs to the P. for _p_.gFree.n < 32 { gp := sched.gFree.list.pop() if gp == nil { break } sched.gFree.n-- _p_.gFree.push(gp) _p_.gFree.n++ } unlock(&sched.gFree.lock) goto retry } gp := _p_.gFree.pop() if gp == nil { return nil } _p_.gFree.n-- return gp } // Purge all cached G's from gfree list to the global list. func gfpurge(_p_ *p) { lock(&sched.gFree.lock) for !_p_.gFree.empty() { gp := _p_.gFree.pop() _p_.gFree.n-- sched.gFree.list.push(gp) sched.gFree.n++ } unlock(&sched.gFree.lock) } // Breakpoint executes a breakpoint trap. func Breakpoint() { breakpoint() } // dolockOSThread is called by LockOSThread and lockOSThread below // after they modify m.locked. Do not allow preemption during this call, // or else the m might be different in this function than in the caller. //go:nosplit func dolockOSThread() { if GOARCH == "wasm" { return // no threads on wasm yet } _g_ := getg() _g_.m.lockedg.set(_g_) _g_.lockedm.set(_g_.m) } //go:nosplit // LockOSThread wires the calling goroutine to its current operating system thread. // The calling goroutine will always execute in that thread, // and no other goroutine will execute in it, // until the calling goroutine has made as many calls to // UnlockOSThread as to LockOSThread. // If the calling goroutine exits without unlocking the thread, // the thread will be terminated. // // All init functions are run on the startup thread. Calling LockOSThread // from an init function will cause the main function to be invoked on // that thread. // // A goroutine should call LockOSThread before calling OS services or // non-Go library functions that depend on per-thread state. func LockOSThread() { if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { // If we need to start a new thread from the locked // thread, we need the template thread. Start it now // while we're in a known-good state. startTemplateThread() } _g_ := getg() _g_.m.lockedExt++ if _g_.m.lockedExt == 0 { _g_.m.lockedExt-- panic("LockOSThread nesting overflow") } dolockOSThread() } //go:nosplit func lockOSThread() { getg().m.lockedInt++ dolockOSThread() } // dounlockOSThread is called by UnlockOSThread and unlockOSThread below // after they update m->locked. Do not allow preemption during this call, // or else the m might be in different in this function than in the caller. //go:nosplit func dounlockOSThread() { if GOARCH == "wasm" { return // no threads on wasm yet } _g_ := getg() if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { return } _g_.m.lockedg = 0 _g_.lockedm = 0 } //go:nosplit // UnlockOSThread undoes an earlier call to LockOSThread. // If this drops the number of active LockOSThread calls on the // calling goroutine to zero, it unwires the calling goroutine from // its fixed operating system thread. // If there are no active LockOSThread calls, this is a no-op. // // Before calling UnlockOSThread, the caller must ensure that the OS // thread is suitable for running other goroutines. If the caller made // any permanent changes to the state of the thread that would affect // other goroutines, it should not call this function and thus leave // the goroutine locked to the OS thread until the goroutine (and // hence the thread) exits. func UnlockOSThread() { _g_ := getg() if _g_.m.lockedExt == 0 { return } _g_.m.lockedExt-- dounlockOSThread() } //go:nosplit func unlockOSThread() { _g_ := getg() if _g_.m.lockedInt == 0 { systemstack(badunlockosthread) } _g_.m.lockedInt-- dounlockOSThread() } func badunlockosthread() { throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") } func gcount() int32 { n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) for _, _p_ := range allp { n -= _p_.gFree.n } // All these variables can be changed concurrently, so the result can be inconsistent. // But at least the current goroutine is running. if n < 1 { n = 1 } return n } func mcount() int32 { return int32(sched.mnext - sched.nmfreed) } var prof struct { signalLock uint32 hz int32 } func _System() { _System() } func _ExternalCode() { _ExternalCode() } func _LostExternalCode() { _LostExternalCode() } func _GC() { _GC() } func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } func _VDSO() { _VDSO() } var _SystemPC = funcPC(_System) var _ExternalCodePC = funcPC(_ExternalCode) var _LostExternalCodePC = funcPC(_LostExternalCode) var _GCPC = funcPC(_GC) var _LostSIGPROFDuringAtomic64PC = funcPC(_LostSIGPROFDuringAtomic64) // Called if we receive a SIGPROF signal. // Called by the signal handler, may run during STW. //go:nowritebarrierrec func sigprof(pc uintptr, gp *g, mp *m) { if prof.hz == 0 { return } // Profiling runs concurrently with GC, so it must not allocate. // Set a trap in case the code does allocate. // Note that on windows, one thread takes profiles of all the // other threads, so mp is usually not getg().m. // In fact mp may not even be stopped. // See golang.org/issue/17165. getg().m.mallocing++ traceback := true // If SIGPROF arrived while already fetching runtime callers // we can have trouble on older systems because the unwind // library calls dl_iterate_phdr which was not reentrant in // the past. alreadyInCallers checks for that. if gp == nil || alreadyInCallers() { traceback = false } var stk [maxCPUProfStack]uintptr n := 0 if traceback { var stklocs [maxCPUProfStack]location n = callers(0, stklocs[:]) // Issue 26595: the stack trace we've just collected is going // to include frames that we don't want to report in the CPU // profile, including signal handler frames. Here is what we // might typically see at the point of "callers" above for a // signal delivered to the application routine "interesting" // called by "main". // // 0: runtime.sigprof // 1: runtime.sighandler // 2: runtime.sigtrampgo // 3: runtime.sigtramp // 4: <signal handler called> // 5: main.interesting_routine // 6: main.main // // To ensure a sane profile, walk through the frames in // "stklocs" until we find the "runtime.sigtramp" frame, then // report only those frames below the frame one down from // that. On systems that don't split stack, "sigtramp" can // do a sibling call to "sigtrampgo", so use "sigtrampgo" // if we don't find "sigtramp". If for some reason // neither "runtime.sigtramp" nor "runtime.sigtrampgo" is // present, don't make any changes. framesToDiscard := 0 for i := 0; i < n; i++ { if stklocs[i].function == "runtime.sigtrampgo" && i+2 < n { framesToDiscard = i + 2 } if stklocs[i].function == "runtime.sigtramp" && i+2 < n { framesToDiscard = i + 2 break } } n -= framesToDiscard for i := 0; i < n; i++ { stk[i] = stklocs[i+framesToDiscard].pc } } if n <= 0 { // Normal traceback is impossible or has failed. // Account it against abstract "System" or "GC". n = 2 stk[0] = pc if mp.preemptoff != "" { stk[1] = _GCPC + sys.PCQuantum } else { stk[1] = _SystemPC + sys.PCQuantum } } if prof.hz != 0 { cpuprof.add(gp, stk[:n]) } getg().m.mallocing-- } // Use global arrays rather than using up lots of stack space in the // signal handler. This is safe since while we are executing a SIGPROF // signal other SIGPROF signals are blocked. var nonprofGoStklocs [maxCPUProfStack]location var nonprofGoStk [maxCPUProfStack]uintptr // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, // and the signal handler collected a stack trace in sigprofCallers. // When this is called, sigprofCallersUse will be non-zero. // g is nil, and what we can do is very limited. //go:nosplit //go:nowritebarrierrec func sigprofNonGo(pc uintptr) { if prof.hz != 0 { n := callers(0, nonprofGoStklocs[:]) for i := 0; i < n; i++ { nonprofGoStk[i] = nonprofGoStklocs[i].pc } if n <= 0 { n = 2 nonprofGoStk[0] = pc nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum } cpuprof.addNonGo(nonprofGoStk[:n]) } } // sigprofNonGoPC is called when a profiling signal arrived on a // non-Go thread and we have a single PC value, not a stack trace. // g is nil, and what we can do is very limited. //go:nosplit //go:nowritebarrierrec func sigprofNonGoPC(pc uintptr) { if prof.hz != 0 { stk := []uintptr{ pc, _ExternalCodePC + sys.PCQuantum, } cpuprof.addNonGo(stk) } } // setcpuprofilerate sets the CPU profiling rate to hz times per second. // If hz <= 0, setcpuprofilerate turns off CPU profiling. func setcpuprofilerate(hz int32) { // Force sane arguments. if hz < 0 { hz = 0 } // Disable preemption, otherwise we can be rescheduled to another thread // that has profiling enabled. _g_ := getg() _g_.m.locks++ // Stop profiler on this thread so that it is safe to lock prof. // if a profiling signal came in while we had prof locked, // it would deadlock. setThreadCPUProfiler(0) for !atomic.Cas(&prof.signalLock, 0, 1) { osyield() } if prof.hz != hz { setProcessCPUProfiler(hz) prof.hz = hz } atomic.Store(&prof.signalLock, 0) lock(&sched.lock) sched.profilehz = hz unlock(&sched.lock) if hz != 0 { setThreadCPUProfiler(hz) } _g_.m.locks-- } // init initializes pp, which may be a freshly allocated p or a // previously destroyed p, and transitions it to status _Pgcstop. func (pp *p) init(id int32) { pp.id = id pp.status = _Pgcstop pp.sudogcache = pp.sudogbuf[:0] pp.deferpool = pp.deferpoolbuf[:0] pp.wbBuf.reset() if pp.mcache == nil { if id == 0 { if getg().m.mcache == nil { throw("missing mcache?") } pp.mcache = getg().m.mcache // bootstrap } else { pp.mcache = allocmcache() } } if raceenabled && pp.raceprocctx == 0 { if id == 0 { pp.raceprocctx = raceprocctx0 raceprocctx0 = 0 // bootstrap } else { pp.raceprocctx = raceproccreate() } } } // destroy releases all of the resources associated with pp and // transitions it to status _Pdead. // // sched.lock must be held and the world must be stopped. func (pp *p) destroy() { // Move all runnable goroutines to the global queue for pp.runqhead != pp.runqtail { // Pop from tail of local queue pp.runqtail-- gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr() // Push onto head of global queue globrunqputhead(gp) } if pp.runnext != 0 { globrunqputhead(pp.runnext.ptr()) pp.runnext = 0 } if len(pp.timers) > 0 { plocal := getg().m.p.ptr() // The world is stopped, but we acquire timersLock to // protect against sysmon calling timeSleepUntil. // This is the only case where we hold the timersLock of // more than one P, so there are no deadlock concerns. lock(&plocal.timersLock) lock(&pp.timersLock) moveTimers(plocal, pp.timers) pp.timers = nil pp.numTimers = 0 pp.adjustTimers = 0 pp.deletedTimers = 0 atomic.Store64(&pp.timer0When, 0) unlock(&pp.timersLock) unlock(&plocal.timersLock) } // If there's a background worker, make it runnable and put // it on the global queue so it can clean itself up. if gp := pp.gcBgMarkWorker.ptr(); gp != nil { casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) } globrunqput(gp) // This assignment doesn't race because the // world is stopped. pp.gcBgMarkWorker.set(nil) } // Flush p's write barrier buffer. if gcphase != _GCoff { wbBufFlush1(pp) pp.gcw.dispose() } for i := range pp.sudogbuf { pp.sudogbuf[i] = nil } pp.sudogcache = pp.sudogbuf[:0] for i := range pp.deferpoolbuf { pp.deferpoolbuf[i] = nil } pp.deferpool = pp.deferpoolbuf[:0] systemstack(func() { for i := 0; i < pp.mspancache.len; i++ { // Safe to call since the world is stopped. mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i])) } pp.mspancache.len = 0 pp.pcache.flush(&mheap_.pages) }) freemcache(pp.mcache) pp.mcache = nil gfpurge(pp) traceProcFree(pp) pp.gcAssistTime = 0 pp.status = _Pdead } // Change number of processors. The world is stopped, sched is locked. // gcworkbufs are not being modified by either the GC or // the write barrier code. // Returns list of Ps with local work, they need to be scheduled by the caller. func procresize(nprocs int32) *p { old := gomaxprocs if old < 0 || nprocs <= 0 { throw("procresize: invalid arg") } if trace.enabled { traceGomaxprocs(nprocs) } // update statistics now := nanotime() if sched.procresizetime != 0 { sched.totaltime += int64(old) * (now - sched.procresizetime) } sched.procresizetime = now // Grow allp if necessary. if nprocs > int32(len(allp)) { // Synchronize with retake, which could be running // concurrently since it doesn't run on a P. lock(&allpLock) if nprocs <= int32(cap(allp)) { allp = allp[:nprocs] } else { nallp := make([]*p, nprocs) // Copy everything up to allp's cap so we // never lose old allocated Ps. copy(nallp, allp[:cap(allp)]) allp = nallp } unlock(&allpLock) } // initialize new P's for i := old; i < nprocs; i++ { pp := allp[i] if pp == nil { pp = new(p) } pp.init(i) atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) } _g_ := getg() if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { // continue to use the current P _g_.m.p.ptr().status = _Prunning _g_.m.p.ptr().mcache.prepareForSweep() } else { // release the current P and acquire allp[0]. // // We must do this before destroying our current P // because p.destroy itself has write barriers, so we // need to do that from a valid P. if _g_.m.p != 0 { if trace.enabled { // Pretend that we were descheduled // and then scheduled again to keep // the trace sane. traceGoSched() traceProcStop(_g_.m.p.ptr()) } _g_.m.p.ptr().m = 0 } _g_.m.p = 0 _g_.m.mcache = nil p := allp[0] p.m = 0 p.status = _Pidle acquirep(p) if trace.enabled { traceGoStart() } } // release resources from unused P's for i := nprocs; i < old; i++ { p := allp[i] p.destroy() // can't free P itself because it can be referenced by an M in syscall } // Trim allp. if int32(len(allp)) != nprocs { lock(&allpLock) allp = allp[:nprocs] unlock(&allpLock) } var runnablePs *p for i := nprocs - 1; i >= 0; i-- { p := allp[i] if _g_.m.p.ptr() == p { continue } p.status = _Pidle if runqempty(p) { pidleput(p) } else { p.m.set(mget()) p.link.set(runnablePs) runnablePs = p } } stealOrder.reset(uint32(nprocs)) var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) return runnablePs } // Associate p and the current m. // // This function is allowed to have write barriers even if the caller // isn't because it immediately acquires _p_. // //go:yeswritebarrierrec func acquirep(_p_ *p) { // Do the part that isn't allowed to have write barriers. wirep(_p_) // Have p; write barriers now allowed. // Perform deferred mcache flush before this P can allocate // from a potentially stale mcache. _p_.mcache.prepareForSweep() if trace.enabled { traceProcStart() } } // wirep is the first step of acquirep, which actually associates the // current M to _p_. This is broken out so we can disallow write // barriers for this part, since we don't yet have a P. // //go:nowritebarrierrec //go:nosplit func wirep(_p_ *p) { _g_ := getg() if _g_.m.p != 0 || _g_.m.mcache != nil { throw("wirep: already in go") } if _p_.m != 0 || _p_.status != _Pidle { id := int64(0) if _p_.m != 0 { id = _p_.m.ptr().id } print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") throw("wirep: invalid p state") } _g_.m.mcache = _p_.mcache _g_.m.p.set(_p_) _p_.m.set(_g_.m) _p_.status = _Prunning } // Disassociate p and the current m. func releasep() *p { _g_ := getg() if _g_.m.p == 0 || _g_.m.mcache == nil { throw("releasep: invalid arg") } _p_ := _g_.m.p.ptr() if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") throw("releasep: invalid p state") } if trace.enabled { traceProcStop(_g_.m.p.ptr()) } _g_.m.p = 0 _g_.m.mcache = nil _p_.m = 0 _p_.status = _Pidle return _p_ } func incidlelocked(v int32) { lock(&sched.lock) sched.nmidlelocked += v if v > 0 { checkdead() } unlock(&sched.lock) } // Check for deadlock situation. // The check is based on number of running M's, if 0 -> deadlock. // sched.lock must be held. func checkdead() { // For -buildmode=c-shared or -buildmode=c-archive it's OK if // there are no running goroutines. The calling program is // assumed to be running. if islibrary || isarchive { return } // If we are dying because of a signal caught on an already idle thread, // freezetheworld will cause all running threads to block. // And runtime will essentially enter into deadlock state, // except that there is a thread that will call exit soon. if panicking > 0 { return } // If we are not running under cgo, but we have an extra M then account // for it. (It is possible to have an extra M on Windows without cgo to // accommodate callbacks created by syscall.NewCallback. See issue #6751 // for details.) var run0 int32 if !iscgo && cgoHasExtraM { mp := lockextra(true) haveExtraM := extraMCount > 0 unlockextra(mp) if haveExtraM { run0 = 1 } } run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys if run > run0 { return } if run < 0 { print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") throw("checkdead: inconsistent counts") } grunning := 0 lock(&allglock) for i := 0; i < len(allgs); i++ { gp := allgs[i] if isSystemGoroutine(gp, false) { continue } s := readgstatus(gp) switch s &^ _Gscan { case _Gwaiting, _Gpreempted: grunning++ case _Grunnable, _Grunning, _Gsyscall: unlock(&allglock) print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") throw("checkdead: runnable g") } } unlock(&allglock) if grunning == 0 { // possible if main goroutine calls runtime·Goexit() unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang throw("no goroutines (main called runtime.Goexit) - deadlock!") } // Maybe jump time forward for playground. if faketime != 0 { when, _p_ := timeSleepUntil() if _p_ != nil { faketime = when for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { if (*pp).ptr() == _p_ { *pp = _p_.link break } } mp := mget() if mp == nil { // There should always be a free M since // nothing is running. throw("checkdead: no m for timer") } mp.nextp.set(_p_) notewakeup(&mp.park) return } } // There are no goroutines running, so we can look at the P's. for _, _p_ := range allp { if len(_p_.timers) > 0 { return } } getg().m.throwing = -1 // do not dump full stacks unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang throw("all goroutines are asleep - deadlock!") } // forcegcperiod is the maximum time in nanoseconds between garbage // collections. If we go this long without a garbage collection, one // is forced to run. // // This is a variable for testing purposes. It normally doesn't change. var forcegcperiod int64 = 2 * 60 * 1e9 // Always runs without a P, so write barriers are not allowed. // //go:nowritebarrierrec func sysmon() { lock(&sched.lock) sched.nmsys++ checkdead() unlock(&sched.lock) lasttrace := int64(0) idle := 0 // how many cycles in succession we had not wokeup somebody delay := uint32(0) for { if idle == 0 { // start with 20us sleep... delay = 20 } else if idle > 50 { // start doubling the sleep after 1ms... delay *= 2 } if delay > 10*1000 { // up to 10ms delay = 10 * 1000 } usleep(delay) now := nanotime() next, _ := timeSleepUntil() if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { lock(&sched.lock) if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { if next > now { atomic.Store(&sched.sysmonwait, 1) unlock(&sched.lock) // Make wake-up period small enough // for the sampling to be correct. sleep := forcegcperiod / 2 if next-now < sleep { sleep = next - now } shouldRelax := sleep >= osRelaxMinNS if shouldRelax { osRelax(true) } notetsleep(&sched.sysmonnote, sleep) if shouldRelax { osRelax(false) } now = nanotime() next, _ = timeSleepUntil() lock(&sched.lock) atomic.Store(&sched.sysmonwait, 0) noteclear(&sched.sysmonnote) } idle = 0 delay = 20 } unlock(&sched.lock) } // trigger libc interceptors if needed if *cgo_yield != nil { asmcgocall(*cgo_yield, nil) } // poll network if not polled for more than 10ms lastpoll := int64(atomic.Load64(&sched.lastpoll)) if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) list := netpoll(0) // non-blocking - returns list of goroutines if !list.empty() { // Need to decrement number of idle locked M's // (pretending that one more is running) before injectglist. // Otherwise it can lead to the following situation: // injectglist grabs all P's but before it starts M's to run the P's, // another M returns from syscall, finishes running its G, // observes that there is no work to do and no other running M's // and reports deadlock. incidlelocked(-1) injectglist(&list) incidlelocked(1) } } if next < now { // There are timers that should have already run, // perhaps because there is an unpreemptible P. // Try to start an M to run them. startm(nil, false) } // retake P's blocked in syscalls // and preempt long running G's if retake(now) != 0 { idle = 0 } else { idle++ } // check if we need to force a GC if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { lock(&forcegc.lock) forcegc.idle = 0 var list gList list.push(forcegc.g) injectglist(&list) unlock(&forcegc.lock) } if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { lasttrace = now schedtrace(debug.scheddetail > 0) } } } type sysmontick struct { schedtick uint32 schedwhen int64 syscalltick uint32 syscallwhen int64 } // forcePreemptNS is the time slice given to a G before it is // preempted. const forcePreemptNS = 10 * 1000 * 1000 // 10ms func retake(now int64) uint32 { n := 0 // Prevent allp slice changes. This lock will be completely // uncontended unless we're already stopping the world. lock(&allpLock) // We can't use a range loop over allp because we may // temporarily drop the allpLock. Hence, we need to re-fetch // allp each time around the loop. for i := 0; i < len(allp); i++ { _p_ := allp[i] if _p_ == nil { // This can happen if procresize has grown // allp but not yet created new Ps. continue } pd := &_p_.sysmontick s := _p_.status sysretake := false if s == _Prunning || s == _Psyscall { // Preempt G if it's running for too long. t := int64(_p_.schedtick) if int64(pd.schedtick) != t { pd.schedtick = uint32(t) pd.schedwhen = now } else if pd.schedwhen+forcePreemptNS <= now { preemptone(_p_) // In case of syscall, preemptone() doesn't // work, because there is no M wired to P. sysretake = true } } if s == _Psyscall { // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). t := int64(_p_.syscalltick) if !sysretake && int64(pd.syscalltick) != t { pd.syscalltick = uint32(t) pd.syscallwhen = now continue } // On the one hand we don't want to retake Ps if there is no other work to do, // but on the other hand we want to retake them eventually // because they can prevent the sysmon thread from deep sleep. if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { continue } // Drop allpLock so we can take sched.lock. unlock(&allpLock) // Need to decrement number of idle locked M's // (pretending that one more is running) before the CAS. // Otherwise the M from which we retake can exit the syscall, // increment nmidle and report deadlock. incidlelocked(-1) if atomic.Cas(&_p_.status, s, _Pidle) { if trace.enabled { traceGoSysBlock(_p_) traceProcStop(_p_) } n++ _p_.syscalltick++ handoffp(_p_) } incidlelocked(1) lock(&allpLock) } } unlock(&allpLock) return uint32(n) } // Tell all goroutines that they have been preempted and they should stop. // This function is purely best-effort. It can fail to inform a goroutine if a // processor just started running it. // No locks need to be held. // Returns true if preemption request was issued to at least one goroutine. func preemptall() bool { res := false for _, _p_ := range allp { if _p_.status != _Prunning { continue } if preemptone(_p_) { res = true } } return res } // Tell the goroutine running on processor P to stop. // This function is purely best-effort. It can incorrectly fail to inform the // goroutine. It can send inform the wrong goroutine. Even if it informs the // correct goroutine, that goroutine might ignore the request if it is // simultaneously executing newstack. // No lock needs to be held. // Returns true if preemption request was issued. // The actual preemption will happen at some point in the future // and will be indicated by the gp->status no longer being // Grunning func preemptone(_p_ *p) bool { mp := _p_.m.ptr() if mp == nil || mp == getg().m { return false } gp := mp.curg if gp == nil || gp == mp.g0 { return false } gp.preempt = true // At this point the gc implementation sets gp.stackguard0 to // a value that causes the goroutine to suspend itself. // gccgo has no support for this, and it's hard to support. // The split stack code reads a value from its TCB. // We have no way to set a value in the TCB of a different thread. // And, of course, not all systems support split stack anyhow. // Checking the field in the g is expensive, since it requires // loading the g from TLS. The best mechanism is likely to be // setting a global variable and figuring out a way to efficiently // check that global variable. // // For now we check gp.preempt in schedule, mallocgc, selectgo, // and a few other places, which is at least better than doing // nothing at all. // Request an async preemption of this P. if preemptMSupported && debug.asyncpreemptoff == 0 { _p_.preempt = true preemptM(mp) } return true } var starttime int64 func schedtrace(detailed bool) { now := nanotime() if starttime == 0 { starttime = now } lock(&sched.lock) print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) if detailed { print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") } // We must be careful while reading data from P's, M's and G's. // Even if we hold schedlock, most data can be changed concurrently. // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. for i, _p_ := range allp { mp := _p_.m.ptr() h := atomic.Load(&_p_.runqhead) t := atomic.Load(&_p_.runqtail) if detailed { id := int64(-1) if mp != nil { id = mp.id } print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n") } else { // In non-detailed mode format lengths of per-P run queues as: // [len1 len2 len3 len4] print(" ") if i == 0 { print("[") } print(t - h) if i == len(allp)-1 { print("]\n") } } } if !detailed { unlock(&sched.lock) return } for mp := allm; mp != nil; mp = mp.alllink { _p_ := mp.p.ptr() gp := mp.curg lockedg := mp.lockedg.ptr() id1 := int32(-1) if _p_ != nil { id1 = _p_.id } id2 := int64(-1) if gp != nil { id2 = gp.goid } id3 := int64(-1) if lockedg != nil { id3 = lockedg.goid } print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") } lock(&allglock) for gi := 0; gi < len(allgs); gi++ { gp := allgs[gi] mp := gp.m lockedm := gp.lockedm.ptr() id1 := int64(-1) if mp != nil { id1 = mp.id } id2 := int64(-1) if lockedm != nil { id2 = lockedm.id } print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") } unlock(&allglock) unlock(&sched.lock) } // schedEnableUser enables or disables the scheduling of user // goroutines. // // This does not stop already running user goroutines, so the caller // should first stop the world when disabling user goroutines. func schedEnableUser(enable bool) { lock(&sched.lock) if sched.disable.user == !enable { unlock(&sched.lock) return } sched.disable.user = !enable if enable { n := sched.disable.n sched.disable.n = 0 globrunqputbatch(&sched.disable.runnable, n) unlock(&sched.lock) for ; n != 0 && sched.npidle != 0; n-- { startm(nil, false) } } else { unlock(&sched.lock) } } // schedEnabled reports whether gp should be scheduled. It returns // false is scheduling of gp is disabled. func schedEnabled(gp *g) bool { if sched.disable.user { return isSystemGoroutine(gp, true) } return true } // Put mp on midle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func mput(mp *m) { mp.schedlink = sched.midle sched.midle.set(mp) sched.nmidle++ checkdead() } // Try to get an m from midle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func mget() *m { mp := sched.midle.ptr() if mp != nil { sched.midle = mp.schedlink sched.nmidle-- } return mp } // Put gp on the global runnable queue. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func globrunqput(gp *g) { sched.runq.pushBack(gp) sched.runqsize++ } // Put gp at the head of the global runnable queue. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func globrunqputhead(gp *g) { sched.runq.push(gp) sched.runqsize++ } // Put a batch of runnable goroutines on the global runnable queue. // This clears *batch. // Sched must be locked. func globrunqputbatch(batch *gQueue, n int32) { sched.runq.pushBackAll(*batch) sched.runqsize += n *batch = gQueue{} } // Try get a batch of G's from the global runnable queue. // Sched must be locked. func globrunqget(_p_ *p, max int32) *g { if sched.runqsize == 0 { return nil } n := sched.runqsize/gomaxprocs + 1 if n > sched.runqsize { n = sched.runqsize } if max > 0 && n > max { n = max } if n > int32(len(_p_.runq))/2 { n = int32(len(_p_.runq)) / 2 } sched.runqsize -= n gp := sched.runq.pop() n-- for ; n > 0; n-- { gp1 := sched.runq.pop() runqput(_p_, gp1, false) } return gp } // Put p to on _Pidle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func pidleput(_p_ *p) { if !runqempty(_p_) { throw("pidleput: P has non-empty run queue") } _p_.link = sched.pidle sched.pidle.set(_p_) atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic } // Try get a p from _Pidle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func pidleget() *p { _p_ := sched.pidle.ptr() if _p_ != nil { sched.pidle = _p_.link atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic } return _p_ } // runqempty reports whether _p_ has no Gs on its local run queue. // It never returns true spuriously. func runqempty(_p_ *p) bool { // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. // Simply observing that runqhead == runqtail and then observing that runqnext == nil // does not mean the queue is empty. for { head := atomic.Load(&_p_.runqhead) tail := atomic.Load(&_p_.runqtail) runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) if tail == atomic.Load(&_p_.runqtail) { return head == tail && runnext == 0 } } } // To shake out latent assumptions about scheduling order, // we introduce some randomness into scheduling decisions // when running with the race detector. // The need for this was made obvious by changing the // (deterministic) scheduling order in Go 1.5 and breaking // many poorly-written tests. // With the randomness here, as long as the tests pass // consistently with -race, they shouldn't have latent scheduling // assumptions. const randomizeScheduler = raceenabled // runqput tries to put g on the local runnable queue. // If next is false, runqput adds g to the tail of the runnable queue. // If next is true, runqput puts g in the _p_.runnext slot. // If the run queue is full, runnext puts g on the global queue. // Executed only by the owner P. func runqput(_p_ *p, gp *g, next bool) { if randomizeScheduler && next && fastrand()%2 == 0 { next = false } if next { retryNext: oldnext := _p_.runnext if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { goto retryNext } if oldnext == 0 { return } // Kick the old runnext out to the regular run queue. gp = oldnext.ptr() } retry: h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers t := _p_.runqtail if t-h < uint32(len(_p_.runq)) { _p_.runq[t%uint32(len(_p_.runq))].set(gp) atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption return } if runqputslow(_p_, gp, h, t) { return } // the queue is not full, now the put above must succeed goto retry } // Put g and a batch of work from local runnable queue on global queue. // Executed only by the owner P. func runqputslow(_p_ *p, gp *g, h, t uint32) bool { var batch [len(_p_.runq)/2 + 1]*g // First, grab a batch from local queue. n := t - h n = n / 2 if n != uint32(len(_p_.runq)/2) { throw("runqputslow: queue is not full") } for i := uint32(0); i < n; i++ { batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() } if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume return false } batch[n] = gp if randomizeScheduler { for i := uint32(1); i <= n; i++ { j := fastrandn(i + 1) batch[i], batch[j] = batch[j], batch[i] } } // Link the goroutines. for i := uint32(0); i < n; i++ { batch[i].schedlink.set(batch[i+1]) } var q gQueue q.head.set(batch[0]) q.tail.set(batch[n]) // Now put the batch on global queue. lock(&sched.lock) globrunqputbatch(&q, int32(n+1)) unlock(&sched.lock) return true } // Get g from local runnable queue. // If inheritTime is true, gp should inherit the remaining time in the // current time slice. Otherwise, it should start a new time slice. // Executed only by the owner P. func runqget(_p_ *p) (gp *g, inheritTime bool) { // If there's a runnext, it's the next G to run. for { next := _p_.runnext if next == 0 { break } if _p_.runnext.cas(next, 0) { return next.ptr(), true } } for { h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers t := _p_.runqtail if t == h { return nil, false } gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume return gp, false } } } // Grabs a batch of goroutines from _p_'s runnable queue into batch. // Batch is a ring buffer starting at batchHead. // Returns number of grabbed goroutines. // Can be executed by any P. func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { for { h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer n := t - h n = n - n/2 if n == 0 { if stealRunNextG { // Try to steal from _p_.runnext. if next := _p_.runnext; next != 0 { if _p_.status == _Prunning { // Sleep to ensure that _p_ isn't about to run the g // we are about to steal. // The important use case here is when the g running // on _p_ ready()s another g and then almost // immediately blocks. Instead of stealing runnext // in this window, back off to give _p_ a chance to // schedule runnext. This will avoid thrashing gs // between different Ps. // A sync chan send/recv takes ~50ns as of time of // writing, so 3us gives ~50x overshoot. if GOOS != "windows" { usleep(3) } else { // On windows system timer granularity is // 1-15ms, which is way too much for this // optimization. So just yield. osyield() } } if !_p_.runnext.cas(next, 0) { continue } batch[batchHead%uint32(len(batch))] = next return 1 } } return 0 } if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t continue } for i := uint32(0); i < n; i++ { g := _p_.runq[(h+i)%uint32(len(_p_.runq))] batch[(batchHead+i)%uint32(len(batch))] = g } if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume return n } } } // Steal half of elements from local runnable queue of p2 // and put onto local runnable queue of p. // Returns one of the stolen elements (or nil if failed). func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { t := _p_.runqtail n := runqgrab(p2, &_p_.runq, t, stealRunNextG) if n == 0 { return nil } n-- gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() if n == 0 { return gp } h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers if t-h+n >= uint32(len(_p_.runq)) { throw("runqsteal: runq overflow") } atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption return gp } // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only // be on one gQueue or gList at a time. type gQueue struct { head guintptr tail guintptr } // empty reports whether q is empty. func (q *gQueue) empty() bool { return q.head == 0 } // push adds gp to the head of q. func (q *gQueue) push(gp *g) { gp.schedlink = q.head q.head.set(gp) if q.tail == 0 { q.tail.set(gp) } } // pushBack adds gp to the tail of q. func (q *gQueue) pushBack(gp *g) { gp.schedlink = 0 if q.tail != 0 { q.tail.ptr().schedlink.set(gp) } else { q.head.set(gp) } q.tail.set(gp) } // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must // not be used. func (q *gQueue) pushBackAll(q2 gQueue) { if q2.tail == 0 { return } q2.tail.ptr().schedlink = 0 if q.tail != 0 { q.tail.ptr().schedlink = q2.head } else { q.head = q2.head } q.tail = q2.tail } // pop removes and returns the head of queue q. It returns nil if // q is empty. func (q *gQueue) pop() *g { gp := q.head.ptr() if gp != nil { q.head = gp.schedlink if q.head == 0 { q.tail = 0 } } return gp } // popList takes all Gs in q and returns them as a gList. func (q *gQueue) popList() gList { stack := gList{q.head} *q = gQueue{} return stack } // A gList is a list of Gs linked through g.schedlink. A G can only be // on one gQueue or gList at a time. type gList struct { head guintptr } // empty reports whether l is empty. func (l *gList) empty() bool { return l.head == 0 } // push adds gp to the head of l. func (l *gList) push(gp *g) { gp.schedlink = l.head l.head.set(gp) } // pushAll prepends all Gs in q to l. func (l *gList) pushAll(q gQueue) { if !q.empty() { q.tail.ptr().schedlink = l.head l.head = q.head } } // pop removes and returns the head of l. If l is empty, it returns nil. func (l *gList) pop() *g { gp := l.head.ptr() if gp != nil { l.head = gp.schedlink } return gp } //go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads func setMaxThreads(in int) (out int) { lock(&sched.lock) out = int(sched.maxmcount) if in > 0x7fffffff { // MaxInt32 sched.maxmcount = 0x7fffffff } else { sched.maxmcount = int32(in) } checkmcount() unlock(&sched.lock) return } func haveexperiment(name string) bool { // The gofrontend does not support experiments. return false } //go:nosplit func procPin() int { _g_ := getg() mp := _g_.m mp.locks++ return int(mp.p.ptr().id) } //go:nosplit func procUnpin() { _g_ := getg() _g_.m.locks-- } //go:linkname sync_runtime_procPin sync.runtime_procPin //go:nosplit func sync_runtime_procPin() int { return procPin() } //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin //go:nosplit func sync_runtime_procUnpin() { procUnpin() } //go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin //go:nosplit func sync_atomic_runtime_procPin() int { return procPin() } //go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin //go:nosplit func sync_atomic_runtime_procUnpin() { procUnpin() } // Active spinning for sync.Mutex. //go:linkname sync_runtime_canSpin sync.runtime_canSpin //go:nosplit func sync_runtime_canSpin(i int) bool { // sync.Mutex is cooperative, so we are conservative with spinning. // Spin only few times and only if running on a multicore machine and // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. // As opposed to runtime mutex we don't do passive spinning here, // because there can be work on global runq or on other Ps. if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { return false } if p := getg().m.p.ptr(); !runqempty(p) { return false } return true } //go:linkname sync_runtime_doSpin sync.runtime_doSpin //go:nosplit func sync_runtime_doSpin() { procyield(active_spin_cnt) } var stealOrder randomOrder // randomOrder/randomEnum are helper types for randomized work stealing. // They allow to enumerate all Ps in different pseudo-random orders without repetitions. // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. type randomOrder struct { count uint32 coprimes []uint32 } type randomEnum struct { i uint32 count uint32 pos uint32 inc uint32 } func (ord *randomOrder) reset(count uint32) { ord.count = count ord.coprimes = ord.coprimes[:0] for i := uint32(1); i <= count; i++ { if gcd(i, count) == 1 { ord.coprimes = append(ord.coprimes, i) } } } func (ord *randomOrder) start(i uint32) randomEnum { return randomEnum{ count: ord.count, pos: i % ord.count, inc: ord.coprimes[i%uint32(len(ord.coprimes))], } } func (enum *randomEnum) done() bool { return enum.i == enum.count } func (enum *randomEnum) next() { enum.i++ enum.pos = (enum.pos + enum.inc) % enum.count } func (enum *randomEnum) position() uint32 { return enum.pos } func gcd(a, b uint32) uint32 { for b != 0 { a, b = b, a%b } return a }
Timers(pp *
query_params_builder.rs
use super::{QueryFlags, QueryParams, QueryValues}; use crate::consistency::Consistency; use crate::types::CBytes; #[derive(Debug, Default)] pub struct QueryParamsBuilder { consistency: Consistency, flags: Option<Vec<QueryFlags>>, values: Option<QueryValues>, with_names: Option<bool>, page_size: Option<i32>, paging_state: Option<CBytes>, serial_consistency: Option<Consistency>, timestamp: Option<i64>, } impl QueryParamsBuilder { /// Factory function that returns new `QueryBuilder`. /// Default consistency level is `One` pub fn new() -> QueryParamsBuilder { Default::default() } /// Sets new query consistency pub fn consistency(mut self, consistency: Consistency) -> Self { self.consistency = consistency; self } // Sets new flags. builder_opt_field!(flags, Vec<QueryFlags>); /// Sets new values. /// Sets new query consistency pub fn values(mut self, values: QueryValues) -> Self { let with_names = values.with_names(); self.with_names = Some(with_names); self.values = Some(values); self.flags = self.flags.or_else(|| Some(vec![])).map(|mut flags| { flags.push(QueryFlags::Value); if with_names { flags.push(QueryFlags::WithNamesForValues); } flags }); self } // Sets new with_names parameter value. builder_opt_field!(with_names, bool); /// Sets new values. /// Sets new query consistency pub fn page_size(mut self, size: i32) -> Self { self.page_size = Some(size); self.flags = self.flags.or_else(|| Some(vec![])).map(|mut flags| { flags.push(QueryFlags::PageSize); flags }); self } /// Sets new values. /// Sets new query consistency pub fn paging_state(mut self, state: CBytes) -> Self { self.paging_state = Some(state); self.flags = self.flags.or_else(|| Some(vec![])).map(|mut flags| { flags.push(QueryFlags::WithPagingState); flags }); self } // Sets new serial_consistency value. builder_opt_field!(serial_consistency, Consistency); // Sets new timestamp value. builder_opt_field!(timestamp, i64); /// Finalizes query building process and returns query itself pub fn finalize(self) -> QueryParams { QueryParams { consistency: self.consistency, flags: self.flags.unwrap_or_default(), values: self.values, with_names: self.with_names, page_size: self.page_size, paging_state: self.paging_state, serial_consistency: self.serial_consistency, timestamp: self.timestamp, } }
}
service_test.go
package minirpc import ( "reflect" "testing" ) type Foo struct{} type Args struct { A, B int } // 可导出方法 func (f Foo) Sum(args Args, reply *int) error { *reply = args.A + args.B return nil } // 不可导出方法 func (f Foo) sum(args Args, reply *int) error { *reply = args.A + args.B return nil } func _assert(t *testing.T, ok bool, format string, args ...interface{}) { if !ok { t.Fatalf(format, args...) } } func TestNewService(t *test
ert(t, newService(Foo{}) != nil, "NewService failed") svc := newService(Foo{}) _assert(t, svc.name == "Foo", "NewService failed") _assert(t, len(svc.method) == 1, "NewService failed") _assert(t, svc.method["Sum"] != nil, "NewService failed") _assert(t, svc.method["sum"] == nil, "NewService failed") } func TestMethodType_Call(t *testing.T) { svc := newService(Foo{}) mType := svc.method["Sum"] args := mType.newArgv() reply := mType.newReply() args.Set(reflect.ValueOf(Args{1, 2})) err := svc.call(mType, args, reply) _assert(t, err == nil, "call failed") _assert(t, *reply.Interface().(*int) == 3, "call failed") }
ing.T) { _ass
background.js
var defaults_dict = { 'downscroll_speed_opt': 75, 'prev_key_opt': 37, // L arrow 'next_key_opt': 39, // R arrow 'alttext_key_opt': 32, // spacebar 'random_key_opt': 82, // 'r'
chrome.storage.sync.set({default_vals: defaults_dict});
};
pexels.go
package pexels //go:generate mockgen --destination=../mocks/mock_pexeler.go --package mocks github.com/martinomburajr/pexels/pexels Pexeler,GetRandomPexeler import ( "encoding/json" "errors" "fmt" "github.com/martinomburajr/pexels/auth" "github.com/martinomburajr/pexels/utils" "io/ioutil" "net/http" "strings" ) const ( //ImageSizeOriginal represents the original size. Typically the largest with the best quality ImageSizeOriginal = "original" //ImageSizeLarge is a large photo ImageSizeLarge = "large" //ImageSizeLarge2x double the resolution of the largest ImageSizeLarge2x = "large2x" //ImageSizeMedium medium photo ImageSizeMedium = "medium" //ImageSizeSmall small photo (lacks in resolution) ImageSizeSmall = "small" //ImageSizePortrait portrait mode. This image is usually cropped to fit that size ImageSizePortrait = "portrait" //ImageSizeLandscape landscape sized photo ImageSizeLandscape = "landscape" //ImageSizeLandscape tiny photo ImageSizeTiny = "tiny" //BaseURL is the base URL to the API BaseURL = "https://api.pexels.com/v1/" //URLCurated is a path to the curated section within pexels. According to pexels ... We add at least one new photo per hour to our curated list so that you get a changing selection of trending photos. For more information about the request parameters and response structure have a look at the search method above. URLCurated = "curated" ) // ImageSizes represents a set of image sizes that pexels uses var ImageSizes = []string{ImageSizeOriginal, ImageSizeLarge, ImageSizeLarge2x, ImageSizeMedium, ImageSizeSmall, ImageSizePortrait, ImageSizeLandscape, ImageSizeTiny} // PexelImageRespoonse represents a response from the server regarding an image request type PexelImageResponse struct { Page int `json:"page,omitempty"` PerPage int `json:"per_page,omitempty"` TotalResults int `json:"per_page,omitempty"` URL string `json:"url"` NextPage string `json:"next_page"` Photos []PexelPhoto `json:"photos"` } // PexelPhoto represents the information of photo type PexelPhoto struct { ID int `json:"id,omitempty"` Width int `json:"width,omitempty"` Height int `json:"height,omitempty"` URL string `json:"url,omitempty"` Photographer string `json:"photographer,omitempty"` Source PexelPhotoSource `json:"src,omitempty"` } // PexelPhotoSource represents a photo source embedded within the PexelPhoto type PexelPhotoSource struct { // Original - The size of the original image is given with the attributes width and height. Original string `json:"original,omitempty"` // Large - This image has a maximum width of 940px and a maximum height of 650px. It has the aspect ratio of the original image. Large string `json:"large,omitempty"` // Large2x - This image has a maximum width of 1880px and a maximum height of 1300px. It has the aspect ratio of the original image. Large2x string `json:"large2x,omitempty"` //Medium - This image has a height of 350px and a flexible width. It has the aspect ratio of the original image. Medium string `json:"medium,omitempty"` //Small - This image has a height of 130px and a flexible width. It has the aspect ratio of the original image. Small string `json:"small,omitempty"` //Portrait - This image has a width of 800px and a height of 1200px. Portrait string `json:"portrait,omitempty"` //Landscape - This image has a width of 1200px and height of 627px. Landscape string `json:"landscape,omitempty"` //Tiny - This image has a width of 280px and height of 200px. Tiny string `json:"tiny,omitempty"` } // Pexeler interface contains valid methods that a Pexels type can utilize type Pexeler interface { Get(client *http.Client, session *auth.PexelSessionObj, id int, size string) ([]byte, error) GetRandomImage(size string) (int, []byte, error) GetBySize(size string) string } type GetRandomPexeler interface { GetRandomImage(size string) ([]byte, error) } // PexelPhoto implementation of Getter that retrieves an image based on its size. func (pi *PexelPhoto) Get(client *http.Client, session *auth.PexelSessionObj, id int, size string) ([]byte, error) { urll := fmt.Sprintf("%s%s/%d", BaseURL, "photos", id) req, err := http.NewRequest(http.MethodGet, urll, nil) if err != nil { return nil, err } req.Header.Set(http.CanonicalHeaderKey("Authorization"), session.API_KEY) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if len(data) < 1 { return nil, fmt.Errorf("malformed response from pexels server") } err = json.Unmarshal(data, pi) if err != nil { return nil, err } s := parseSize(size) bySize := pi.GetBySize(s) return GetImage(client, session, bySize) } // GetImage fetches the actual image. The difference between GetImage and Get is that GetImage actually fetches the image, where as Get returns the PexelPhoto body that has a URL link to the image. func GetImage(client *http.Client, session *auth.PexelSessionObj, imageURL string) ([]byte, error) { if client == nil { return nil, errors.New("nil client") } req, err := http.NewRequest(http.MethodGet, imageURL, nil) if err != nil { return nil, err } req.Header.Set(http.CanonicalHeaderKey("Authorization"), session.API_KEY) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() return ioutil.ReadAll(resp.Body) } // parseSize obtains the size arg and if it is empty returns the ImageSizeLarge func parseSize(size string) string
// GetRandomImage returns a random image from the Pexel API func (pi *PexelPhoto) GetRandomImage(size string) (int, []byte, error) { utils := utils.Utils{} randomInt := utils.RandInt(1000) urll := fmt.Sprintf("%s%s?per_page=%d&page=%d", BaseURL, URLCurated, 1, randomInt) data, err := utils.ParseRequest(urll, "") if err != nil { return 0, nil, err } s := parseSize(size) var pr PexelImageResponse err = json.Unmarshal(data, &pr) if err != nil { return 0, nil, err } *pi = pr.Photos[0] bySize := pi.GetBySize(s) data2, err := utils.ParseRequest(bySize, "") return pi.ID, data2, nil } // GetBySize returns the exact size based url based on the size parameter. // The appropriate url is returned as a string. func (pi *PexelPhoto) GetBySize(size string) string { switch size { case ImageSizeLarge2x: return pi.Source.Large2x case ImageSizeLarge: return pi.Source.Large case ImageSizeLandscape: return pi.Source.Landscape case ImageSizeMedium: return pi.Source.Medium case ImageSizeOriginal: return pi.Source.Original case ImageSizeSmall: return pi.Source.Small case ImageSizeTiny: return pi.Source.Tiny default: return pi.Source.Large } return pi.Source.Large }
{ lower := strings.ToLower(size) for _, v := range ImageSizes { if lower == strings.ToLower(v) { return lower } } size = ImageSizeLarge return size }
mod.rs
// Copyright 2017 The Australian National University // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use ast::ir::*; use ast::ptr::*; use std::any::Any; use vm::VM; /// An inlining pass. Based on a certain criteria, the compiler chooses certain functions to be /// inlined in their callsite by rewriting the call into a branch with several copied blocks from /// the inlined function mod inlining; pub use compiler::passes::inlining::Inlining; /// A pass to check and rewrite RET instructions to ensure a single return sink for every function mod ret_sink; pub use compiler::passes::ret_sink::RetSink; /// A pass to inject runtime fastpath into Mu IR mod inject_runtime; pub use compiler::passes::inject_runtime::InjectRuntime; /// A Def-Use pass. Getting use info and count for SSA variables in the IR mod def_use; pub use compiler::passes::def_use::DefUse; /// A tree generation pass. Mu IR is a flat IR instruction sequence, this pass turns it into a /// depth tree which is easier for instruction selection. mod tree_gen; pub use compiler::passes::tree_gen::TreeGen; /// A phi node eliminating pass. Mu IR is SSA based with goto-with-values variants, it still has /// phi node (implicitly). We get out of SSA form at this pass by removing phi nodes, and inserting /// intermediate blocks for moving values around. mod gen_mov_phi; pub use compiler::passes::gen_mov_phi::GenMovPhi; /// A control flow analysis pass at IR level. mod control_flow; pub use compiler::passes::control_flow::ControlFlowAnalysis; /// A trace scheduling pass. It uses the CFA result from last pass, to schedule blocks that /// favors hot path execution. mod trace_gen; pub use compiler::passes::trace_gen::TraceGen; /// A pass to generate dot graph for current IR. mod dot_gen; pub use compiler::passes::dot_gen::DotGen; mod uir_gen; pub use compiler::passes::uir_gen::UIRGen; /// A trait for implementing compiler passes. /// /// A Mu function is supposed to be travelled in the following order: /// * start_function() /// * visit_function() /// for each block /// * start_block() /// * visit_block() /// for each instruction /// * visit_inst() /// * finish_block() /// * finish_function() /// /// functions can be overridden for each pass' own purpose. #[allow(unused_variables)] pub trait CompilerPass { fn name(&self) -> &'static str; fn as_any(&self) -> &Any; fn execute(&mut self, vm: &VM, func: &mut MuFunctionVersion) { info!("---CompilerPass {} for {}---", self.name(), func); self.start_function(vm, func); self.visit_function(vm, func); self.finish_function(vm, func); info!("---finish---"); } fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) { for (label, ref mut block) in func.content.as_mut().unwrap().blocks.iter_mut() { trace!("block: {}", label); self.start_block(vm, &mut func.context, block); self.visit_block(vm, &mut func.context, block); self.finish_block(vm, &mut func.context, block); } } fn
(&mut self, vm: &VM, func_context: &mut FunctionContext, block: &mut Block) { for inst in block.content.as_mut().unwrap().body.iter_mut() { trace!("{}", inst); self.visit_inst(vm, func_context, inst); } } fn start_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {} fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {} fn start_block(&mut self, vm: &VM, func_context: &mut FunctionContext, block: &mut Block) {} fn finish_block(&mut self, vm: &VM, func_context: &mut FunctionContext, block: &mut Block) {} fn visit_inst(&mut self, vm: &VM, func_context: &mut FunctionContext, node: &P<TreeNode>) {} }
visit_block
parsing_error.rs
use std::string::ToString; #[derive(Clone, Debug, PartialEq)] pub struct ParsingError { pub message: String, } impl ParsingError { pub fn
<T: ToString>(message: T) -> Self { Self { message: message.to_string(), } } pub fn boxed<T: ToString>(message: T) -> Box<Self> { Box::new(Self::new(message)) } } impl std::error::Error for ParsingError {} impl std::fmt::Display for ParsingError { fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!(formatter, "parsing error: {}", self.message) } }
new
local_impl.rs
//! //! The implementation-local statement parser. //! use std::cell::RefCell; use std::rc::Rc; use zinc_lexical::Keyword; use zinc_lexical::Lexeme; use zinc_lexical::Symbol; use zinc_lexical::Token; use zinc_lexical::TokenStream; use crate::error::Error as SyntaxError; use crate::error::ParsingError; use crate::parser::attribute::Parser as AttributeParser; use crate::parser::statement::r#const::Parser as ConstStatementParser; use crate::parser::statement::r#fn::Parser as FnStatementParser; use crate::tree::attribute::Attribute; use crate::tree::statement::local_impl::Statement as ImplementationLocalStatement; /// The invalid statement error hint. pub static HINT_ONLY_SOME_STATEMENTS: &str = "only constants and functions may be declared within a namespace"; /// /// The parser state. /// #[derive(Debug, Clone, Copy)] pub enum State { /// The initial state. AttributeOrNext, /// The attribute list has been parsed so far. Expects the optional `pub` keyword. KeywordPubOrNext, /// The attribute list has been parsed so far. Expects the optional `const` keyword. KeywordConstOrNext, /// The attribute list with optional `pub` and `const` keywords have been parsed so far. Statement, } impl Default for State { fn default() -> Self { Self::AttributeOrNext } } /// /// The implementation-local statement parser. /// #[derive(Default)] pub struct Parser { /// The parser state. state: State, /// The `pub` keyword token, which is stored to get its location as the statement location. keyword_public: Option<Token>, /// The `const` keyword token, which is stored to get its location as the statement location. keyword_constant: Option<Token>, /// The statement outer attributes. attributes: Vec<Attribute>, /// The token returned from a subparser. next: Option<Token>, } impl Parser { /// /// Parses a statement allowed in type implementations. /// pub fn parse( mut self, stream: Rc<RefCell<TokenStream>>, initial: Option<Token>, ) -> Result<(ImplementationLocalStatement, Option<Token>), ParsingError> { self.next = initial; loop { match self.state { State::AttributeOrNext => { match crate::parser::take_or_next(self.next.take(), stream.clone())? { token @ Token { lexeme: Lexeme::Symbol(Symbol::Number), .. } => { let (attribute, next) = AttributeParser::default().parse(stream.clone(), Some(token))?; self.attributes.push(attribute); self.next = next; self.state = State::AttributeOrNext; } token => { self.next = Some(token); self.state = State::KeywordPubOrNext; } } } State::KeywordPubOrNext => { match crate::parser::take_or_next(self.next.take(), stream.clone())? { token @ Token { lexeme: Lexeme::Keyword(Keyword::Pub), .. } => self.keyword_public = Some(token), token => self.next = Some(token), } self.state = State::KeywordConstOrNext; continue; } State::KeywordConstOrNext => { match crate::parser::take_or_next(self.next.take(), stream.clone())? { token @ Token { lexeme: Lexeme::Keyword(Keyword::Const), .. } => { let look_ahead = stream.borrow_mut().look_ahead(1)?.to_owned(); if let Token { lexeme: Lexeme::Keyword(Keyword::Fn), .. } = look_ahead { self.keyword_constant = Some(token); } else { return ConstStatementParser::default() .parse(stream.clone(), Some(token)) .map(|(statement, next)| { (ImplementationLocalStatement::Const(statement), next) }); } } token => self.next = Some(token), } self.state = State::Statement; continue; } State::Statement => { return match crate::parser::take_or_next(self.next.take(), stream.clone())? { token @ Token { lexeme: Lexeme::Keyword(Keyword::Fn), .. } => { let (mut builder, next) = FnStatementParser::default().parse(stream.clone(), Some(token))?; if let Some(token) = self.keyword_constant { builder.set_location(token.location); builder.set_constant(); } if let Some(token) = self.keyword_public { builder.set_location(token.location); builder.set_public(); } builder.set_attributes(self.attributes); Ok((ImplementationLocalStatement::Fn(builder.finish()), next)) } Token { lexeme: Lexeme::Symbol(Symbol::Semicolon), location, } => Ok((ImplementationLocalStatement::Empty(location), None)), Token { lexeme, location } => { Err(ParsingError::Syntax(SyntaxError::expected_one_of( location, vec!["const", "fn"], lexeme, Some(HINT_ONLY_SOME_STATEMENTS), ))) } } } } } } } #[cfg(test)] mod tests { use zinc_lexical::Location; use zinc_lexical::TokenStream; use super::Parser; use crate::tree::attribute::Attribute; use crate::tree::binding::Binding; use crate::tree::expression::block::Expression as BlockExpression; use crate::tree::identifier::Identifier; use crate::tree::pattern_binding::variant::Variant as BindingPatternVariant; use crate::tree::pattern_binding::Pattern as BindingPattern; use crate::tree::r#type::variant::Variant as TypeVariant; use crate::tree::r#type::Type; use crate::tree::statement::local_impl::Statement as ImplementationLocalStatement; use crate::tree::statement::r#fn::Statement as FnStatement; #[test] fn ok_fn_public() { let input = r#"pub fn f(a: field) -> field {}"#; let expected = Ok(( ImplementationLocalStatement::Fn(FnStatement::new( Location::test(1, 1), true, false, Identifier::new(Location::test(1, 8), "f".to_owned()), vec![Binding::new( Location::test(1, 10), BindingPattern::new( Location::test(1, 10), BindingPatternVariant::new_binding( Identifier::new(Location::test(1, 10), "a".to_owned()), false, ), ), Some(Type::new(Location::test(1, 13), TypeVariant::field())), )], Some(Type::new(Location::test(1, 23), TypeVariant::field())), BlockExpression::new(Location::test(1, 29), vec![], None), vec![], )), None, )); let result = Parser::default().parse(TokenStream::test(input).wrap(), None); assert_eq!(result, expected); } #[test] fn ok_fn_constant() { let input = r#"const fn f(a: field) -> field {}"#; let expected = Ok(( ImplementationLocalStatement::Fn(FnStatement::new( Location::test(1, 1), false, true, Identifier::new(Location::test(1, 10), "f".to_owned()), vec![Binding::new( Location::test(1, 12), BindingPattern::new( Location::test(1, 12), BindingPatternVariant::new_binding( Identifier::new(Location::test(1, 12), "a".to_owned()), false, ), ), Some(Type::new(Location::test(1, 15), TypeVariant::field())), )], Some(Type::new(Location::test(1, 25), TypeVariant::field())), BlockExpression::new(Location::test(1, 31), vec![], None), vec![], )), None, )); let result = Parser::default().parse(TokenStream::test(input).wrap(), None); assert_eq!(result, expected); }
let expected = Ok(( ImplementationLocalStatement::Fn(FnStatement::new( Location::test(1, 1), true, true, Identifier::new(Location::test(1, 14), "f".to_owned()), vec![Binding::new( Location::test(1, 16), BindingPattern::new( Location::test(1, 16), BindingPatternVariant::new_binding( Identifier::new(Location::test(1, 16), "a".to_owned()), false, ), ), Some(Type::new(Location::test(1, 19), TypeVariant::field())), )], Some(Type::new(Location::test(1, 29), TypeVariant::field())), BlockExpression::new(Location::test(1, 35), vec![], None), vec![], )), None, )); let result = Parser::default().parse(TokenStream::test(input).wrap(), None); assert_eq!(result, expected); } #[test] fn ok_fn_single_attribute() { let input = r#" #[test] fn test() {} "#; let expected = Ok(( ImplementationLocalStatement::Fn(FnStatement::new( Location::test(3, 1), false, false, Identifier::new(Location::test(3, 4), "test".to_owned()), vec![], None, BlockExpression::new(Location::test(3, 11), vec![], None), vec![Attribute::new( Location::test(2, 1), false, Identifier::new(Location::test(2, 3), "test".to_owned()), )], )), None, )); let result = Parser::default().parse(TokenStream::test(input).wrap(), None); assert_eq!(result, expected); } #[test] fn ok_fn_multiple_attributes() { let input = r#" #[test] #[should_panic] #[ignore] fn test() {} "#; let expected = Ok(( ImplementationLocalStatement::Fn(FnStatement::new( Location::test(5, 1), false, false, Identifier::new(Location::test(5, 4), "test".to_owned()), vec![], None, BlockExpression::new(Location::test(5, 11), vec![], None), vec![ Attribute::new( Location::test(2, 1), false, Identifier::new(Location::test(2, 3), "test".to_owned()), ), Attribute::new( Location::test(3, 1), false, Identifier::new(Location::test(3, 3), "should_panic".to_owned()), ), Attribute::new( Location::test(4, 1), false, Identifier::new(Location::test(4, 3), "ignore".to_owned()), ), ], )), None, )); let result = Parser::default().parse(TokenStream::test(input).wrap(), None); assert_eq!(result, expected); } }
#[test] fn ok_fn_public_constant() { let input = r#"pub const fn f(a: field) -> field {}"#;
index.js
export * from './fake-backend';
export * from './fetch-wrapper'; export * from './history'; export * from './role';
main.go
package main import ( "log" "os" "github.com/xescugc/notigator/cmd" ) func
() { if err := cmd.RootCmd.Execute(); err != nil { log.Fatal(err) os.Exit(1) } }
main
zz_generated_webapplicationfirewallpolicies.go
// +build go1.13 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. package armnetwork import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/armcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "net/http" "net/url" "strings" "time" ) // WebApplicationFirewallPoliciesClient contains the methods for the WebApplicationFirewallPolicies group. // Don't use this type directly, use NewWebApplicationFirewallPoliciesClient() instead. type WebApplicationFirewallPoliciesClient struct { con *armcore.Connection subscriptionID string } // NewWebApplicationFirewallPoliciesClient creates a new instance of WebApplicationFirewallPoliciesClient with the specified values. func NewWebApplicationFirewallPoliciesClient(con *armcore.Connection, subscriptionID string) WebApplicationFirewallPoliciesClient { return WebApplicationFirewallPoliciesClient{con: con, subscriptionID: subscriptionID} } // Pipeline returns the pipeline associated with this client. func (client WebApplicationFirewallPoliciesClient) Pipeline() azcore.Pipeline { return client.con.Pipeline() } // CreateOrUpdate - Creates or update policy with specified rule set name within a resource group. func (client WebApplicationFirewallPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, policyName string, parameters WebApplicationFirewallPolicy, options *WebApplicationFirewallPoliciesCreateOrUpdateOptions) (WebApplicationFirewallPolicyResponse, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, policyName, parameters, options) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } resp, err := client.Pipeline().Do(req) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } if !resp.HasStatusCode(http.StatusOK, http.StatusCreated) { return WebApplicationFirewallPolicyResponse{}, client.createOrUpdateHandleError(resp) } result, err := client.createOrUpdateHandleResponse(resp) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } return result, nil } // createOrUpdateCreateRequest creates the CreateOrUpdate request. func (client WebApplicationFirewallPoliciesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, policyName string, parameters WebApplicationFirewallPolicy, options *WebApplicationFirewallPoliciesCreateOrUpdateOptions) (*azcore.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}" urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) urlPath = strings.ReplaceAll(urlPath, "{policyName}", url.PathEscape(policyName)) urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath)) if err != nil { return nil, err } req.Telemetry(telemetryInfo) query := req.URL.Query() query.Set("api-version", "2020-07-01") req.URL.RawQuery = query.Encode() req.Header.Set("Accept", "application/json") return req, req.MarshalAsJSON(parameters) } // createOrUpdateHandleResponse handles the CreateOrUpdate response. func (client WebApplicationFirewallPoliciesClient) createOrUpdateHandleResponse(resp *azcore.Response) (WebApplicationFirewallPolicyResponse, error) { result := WebApplicationFirewallPolicyResponse{RawResponse: resp.Response} err := resp.UnmarshalAsJSON(&result.WebApplicationFirewallPolicy) return result, err } // createOrUpdateHandleError handles the CreateOrUpdate error response. func (client WebApplicationFirewallPoliciesClient) createOrUpdateHandleError(resp *azcore.Response) error { var err CloudError if err := resp.UnmarshalAsJSON(&err); err != nil { return err } return azcore.NewResponseError(&err, resp.Response) } // BeginDelete - Deletes Policy. func (client WebApplicationFirewallPoliciesClient) BeginDelete(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesBeginDeleteOptions) (HTTPPollerResponse, error) { resp, err := client.delete(ctx, resourceGroupName, policyName, options) if err != nil { return HTTPPollerResponse{}, err } result := HTTPPollerResponse{ RawResponse: resp.Response, } pt, err := armcore.NewPoller("WebApplicationFirewallPoliciesClient.Delete", "location", resp, client.deleteHandleError) if err != nil { return HTTPPollerResponse{}, err } poller := &httpPoller{ pt: pt, pipeline: client.con.Pipeline(), } result.Poller = poller result.PollUntilDone = func(ctx context.Context, frequency time.Duration) (*http.Response, error) { return poller.pollUntilDone(ctx, frequency) } return result, nil } // ResumeDelete creates a new HTTPPoller from the specified resume token. // token - The value must come from a previous call to HTTPPoller.ResumeToken(). func (client WebApplicationFirewallPoliciesClient) ResumeDelete(token string) (HTTPPoller, error) { pt, err := armcore.NewPollerFromResumeToken("WebApplicationFirewallPoliciesClient.Delete", token, client.deleteHandleError) if err != nil { return nil, err } return &httpPoller{ pipeline: client.con.Pipeline(), pt: pt, }, nil } // Delete - Deletes Policy. func (client WebApplicationFirewallPoliciesClient) delete(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesBeginDeleteOptions) (*azcore.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, policyName, options) if err != nil
resp, err := client.Pipeline().Do(req) if err != nil { return nil, err } if !resp.HasStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent) { return nil, client.deleteHandleError(resp) } return resp, nil } // deleteCreateRequest creates the Delete request. func (client WebApplicationFirewallPoliciesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesBeginDeleteOptions) (*azcore.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}" urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) urlPath = strings.ReplaceAll(urlPath, "{policyName}", url.PathEscape(policyName)) urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath)) if err != nil { return nil, err } req.Telemetry(telemetryInfo) query := req.URL.Query() query.Set("api-version", "2020-07-01") req.URL.RawQuery = query.Encode() req.Header.Set("Accept", "application/json") return req, nil } // deleteHandleError handles the Delete error response. func (client WebApplicationFirewallPoliciesClient) deleteHandleError(resp *azcore.Response) error { var err CloudError if err := resp.UnmarshalAsJSON(&err); err != nil { return err } return azcore.NewResponseError(&err, resp.Response) } // Get - Retrieve protection policy with specified name within a resource group. func (client WebApplicationFirewallPoliciesClient) Get(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesGetOptions) (WebApplicationFirewallPolicyResponse, error) { req, err := client.getCreateRequest(ctx, resourceGroupName, policyName, options) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } resp, err := client.Pipeline().Do(req) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } if !resp.HasStatusCode(http.StatusOK) { return WebApplicationFirewallPolicyResponse{}, client.getHandleError(resp) } result, err := client.getHandleResponse(resp) if err != nil { return WebApplicationFirewallPolicyResponse{}, err } return result, nil } // getCreateRequest creates the Get request. func (client WebApplicationFirewallPoliciesClient) getCreateRequest(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesGetOptions) (*azcore.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}" urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) urlPath = strings.ReplaceAll(urlPath, "{policyName}", url.PathEscape(policyName)) urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath)) if err != nil { return nil, err } req.Telemetry(telemetryInfo) query := req.URL.Query() query.Set("api-version", "2020-07-01") req.URL.RawQuery = query.Encode() req.Header.Set("Accept", "application/json") return req, nil } // getHandleResponse handles the Get response. func (client WebApplicationFirewallPoliciesClient) getHandleResponse(resp *azcore.Response) (WebApplicationFirewallPolicyResponse, error) { result := WebApplicationFirewallPolicyResponse{RawResponse: resp.Response} err := resp.UnmarshalAsJSON(&result.WebApplicationFirewallPolicy) return result, err } // getHandleError handles the Get error response. func (client WebApplicationFirewallPoliciesClient) getHandleError(resp *azcore.Response) error { var err CloudError if err := resp.UnmarshalAsJSON(&err); err != nil { return err } return azcore.NewResponseError(&err, resp.Response) } // List - Lists all of the protection policies within a resource group. func (client WebApplicationFirewallPoliciesClient) List(resourceGroupName string, options *WebApplicationFirewallPoliciesListOptions) WebApplicationFirewallPolicyListResultPager { return &webApplicationFirewallPolicyListResultPager{ pipeline: client.con.Pipeline(), requester: func(ctx context.Context) (*azcore.Request, error) { return client.listCreateRequest(ctx, resourceGroupName, options) }, responder: client.listHandleResponse, errorer: client.listHandleError, advancer: func(ctx context.Context, resp WebApplicationFirewallPolicyListResultResponse) (*azcore.Request, error) { return azcore.NewRequest(ctx, http.MethodGet, *resp.WebApplicationFirewallPolicyListResult.NextLink) }, statusCodes: []int{http.StatusOK}, } } // listCreateRequest creates the List request. func (client WebApplicationFirewallPoliciesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *WebApplicationFirewallPoliciesListOptions) (*azcore.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies" urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath)) if err != nil { return nil, err } req.Telemetry(telemetryInfo) query := req.URL.Query() query.Set("api-version", "2020-07-01") req.URL.RawQuery = query.Encode() req.Header.Set("Accept", "application/json") return req, nil } // listHandleResponse handles the List response. func (client WebApplicationFirewallPoliciesClient) listHandleResponse(resp *azcore.Response) (WebApplicationFirewallPolicyListResultResponse, error) { result := WebApplicationFirewallPolicyListResultResponse{RawResponse: resp.Response} err := resp.UnmarshalAsJSON(&result.WebApplicationFirewallPolicyListResult) return result, err } // listHandleError handles the List error response. func (client WebApplicationFirewallPoliciesClient) listHandleError(resp *azcore.Response) error { var err CloudError if err := resp.UnmarshalAsJSON(&err); err != nil { return err } return azcore.NewResponseError(&err, resp.Response) } // ListAll - Gets all the WAF policies in a subscription. func (client WebApplicationFirewallPoliciesClient) ListAll(options *WebApplicationFirewallPoliciesListAllOptions) WebApplicationFirewallPolicyListResultPager { return &webApplicationFirewallPolicyListResultPager{ pipeline: client.con.Pipeline(), requester: func(ctx context.Context) (*azcore.Request, error) { return client.listAllCreateRequest(ctx, options) }, responder: client.listAllHandleResponse, errorer: client.listAllHandleError, advancer: func(ctx context.Context, resp WebApplicationFirewallPolicyListResultResponse) (*azcore.Request, error) { return azcore.NewRequest(ctx, http.MethodGet, *resp.WebApplicationFirewallPolicyListResult.NextLink) }, statusCodes: []int{http.StatusOK}, } } // listAllCreateRequest creates the ListAll request. func (client WebApplicationFirewallPoliciesClient) listAllCreateRequest(ctx context.Context, options *WebApplicationFirewallPoliciesListAllOptions) (*azcore.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies" urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath)) if err != nil { return nil, err } req.Telemetry(telemetryInfo) query := req.URL.Query() query.Set("api-version", "2020-07-01") req.URL.RawQuery = query.Encode() req.Header.Set("Accept", "application/json") return req, nil } // listAllHandleResponse handles the ListAll response. func (client WebApplicationFirewallPoliciesClient) listAllHandleResponse(resp *azcore.Response) (WebApplicationFirewallPolicyListResultResponse, error) { result := WebApplicationFirewallPolicyListResultResponse{RawResponse: resp.Response} err := resp.UnmarshalAsJSON(&result.WebApplicationFirewallPolicyListResult) return result, err } // listAllHandleError handles the ListAll error response. func (client WebApplicationFirewallPoliciesClient) listAllHandleError(resp *azcore.Response) error { var err CloudError if err := resp.UnmarshalAsJSON(&err); err != nil { return err } return azcore.NewResponseError(&err, resp.Response) }
{ return nil, err }
imageMath.py
#!/usr/bin/env python #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Author: Piyush Agram # Copyright 2013, by the California Institute of Technology. ALL RIGHTS RESERVED. # United States Government Sponsorship acknowledged. # Any commercial use must be negotiated with the Office of Technology Transfer at # the California Institute of Technology. # This software may be subject to U.S. export control laws. # By accepting this software, the user agrees to comply with all applicable U.S. # export laws and regulations. User has the responsibility to obtain export licenses, # or other export authority as may be required before exporting such information to # foreign countries or providing access to foreign persons. # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ import argparse import symtable import math import numpy as np from numpy.lib.stride_tricks import as_strided import logging import os import sys helpStr = """ ISCE Band image with imageMath.py Examples: ********* 1) imageMath.py -e='a*exp(-1.0*J*arg(b))' -o test.int -t cfloat --a=resampOnlyImage.int --b=topophase.mph This uses phase from topophase.mph to correct topophase from the interferograms 2) imageMath.py -e='a_0;a_1' --a=resampOnlyImage.amp -o test.amp -s BIL This converts a BIP image to a BIL image 3) imageMath.py -e="abs(a);sqrt(b_0**2 + b_1**2)" --a=topophase.flat --b="topophase.mph;3419;float;2;BIP" -o test.mag -s BIL This should produce a BIL (RMG) image where both channels are equal. Input the correct width before testing this. Rules: ****** 0) Input math expressions should be valid python expressions. 1) A math expression for every band of output image is needed. For a multi-band output image, these expressions are separated by a ;. Example: See Example 2 above. 2) All variable names in the math expressions need to be lower case, single character. Capital characters and multi-char names are reserved for constants and functions respectively. 3) The band of multi-band input images are represented by adding _i to the variable name, where "i" is the band number. All indices are zero-based (C and python). Example : a_0 represents the first band of the image represented by variable "a". 4) For a single band image, the _0 band notation is optional. Example: a_0 and a are equivalent for a single band image. 5) For every lower case variable in the equations, another input "--varname" is needed. Example shown above where --a and --b are defined. 6) Variables can be defined in two ways: a) File name (assuming an ISCE .xml file also exists). Example --a=resamp.int b) Image grammar: "Filename;width;datatype;bands;scheme" Example --a="resamp.int;3200;cfloat;1;BSQ" - Default value for datatype=float - Default value for bands = 1 - Default value for scheme = BSQ c) In the image grammar: Single character codes for datatypes are case sensitive (Numpy convention) whereas multi-character codes are case-insensitive. Internally, everything is translated to numpy convention by the code before processing. """ #######Current list of supported unitary functions - f(x) fnDict = { 'cos': np.cos, 'sin': np.sin, 'exp': np.exp, 'log': np.log, 'log2': np.log2, 'log10': np.log10, 'tan' : np.tan, 'asin': np.arcsin, 'acos': np.arccos, 'atan': np.arctan, 'arg': np.angle, 'conj': np.conj, 'abs' : np.abs, 'round' : np.round, 'ceil' : np.ceil, 'floor' : np.floor, 'real' : np.real, 'imag' : np.imag, 'rad': np.radians, 'deg': np.degrees, 'sqrt': np.sqrt } #######Current list of constants constDict = { "PI" : np.pi, "J" : np.complex(0.0, 1.0), "I" : np.complex(0.0, 1.0), "E" : np.exp(1.0), "NAN" : np.nan } #####Dictionary of global parameters iMath = { 'outFile' : None, ####Output file name 'outBands' : [], ####List of out band mmaps 'outScheme' : 'BSQ', ####Output scheme 'equations' : [], #####List of math equations 'outType' : 'f', ####Output datatype 'width' : None, ####Width of images 'length' : None, ####Length of images 'inBands' : {}, ####Dictionary of input band mmaps 'inFiles' : {} ####Dictionary input file mmaps } ######To deal with data types ''' Translation between user inputs and numpy types. Single char codes are case sensitive (Numpy convention). Multiple char codes are case insensitive. ''' ####Signed byte byte_tuple = ('b', 'byte', 'b8', 'b1') ####Unsigned byte ubyte_tuple = ('B', 'ubyte', 'ub8', 'ub1') ####Short int short_tuple = ('h', 'i2', 'short', 'int2', 'int16') ####Unsigned short int ushort_tuple = ('H', 'ui2', 'ushort', 'uint2', 'uint16') ####Integer int_tuple = ('i', 'i4', 'i32', 'int', 'int32','intc') ####Unsigned int uint_tuple = ('I', 'ui4', 'ui32', 'uint', 'uint32', 'uintc') ####Long int long_tuple = ('l', 'l8', 'l64', 'long', 'long64', 'longc', 'intpy', 'pyint', 'int64') ####Unsigned long int ulong_tuple = ('L', 'ul8', 'ul64', 'ulong', 'ulong64', 'ulongc', 'uintpy', 'pyuint', 'uint64') ######Float float_tuple =('f', 'float', 'single', 'float32', 'real4', 'r4') ######Complex float cfloat_tuple = ('F', 'c8','complex','complex64','cfloat') #####Double double_tuple = ('d', 'double', 'real8', 'r8', 'float64', 'floatpy', 'pyfloat') ######Complex Double cdouble_tuple=('D', 'c16', 'complex128', 'cdouble') ####Mapping to numpy data type typeDict = {} for dtuple in (byte_tuple, ubyte_tuple, short_tuple, short_tuple, int_tuple, uint_tuple, long_tuple, ulong_tuple, float_tuple, cfloat_tuple, double_tuple, cdouble_tuple): for dtype in dtuple: typeDict[dtype] = dtuple[0] def NUMPY_type(instr): ''' Translates a given string into a numpy data type string. ''' tstr = instr.strip() if len(tstr) == 1: key = tstr else: key = tstr.lower() try: npType = typeDict[key] except: raise ValueError('Unknown data type provided : %s '%(instr)) return npType isceTypeDict = { "f" : "FLOAT", "F" : "CFLOAT", "d" : "DOUBLE", "h" : "SHORT", "i" : "INT", "l" : "LONG", } def printNUMPYMap(): import json return json.dumps(typeDict, indent=4, sort_keys=True) #########Classes and utils to deal with strings ############### def isNumeric(s): ''' Determine if a string is a number. ''' try: i = float(s) return True except ValueError, TypeError: return False class NumericStringParser(object): ''' Parse the input expression using Python's inbuilt parser. ''' def __init__(self, num_string): ''' Create a parser object with input string. ''' self.string = num_string self._restricted = fnDict.keys() + constDict.keys() def parse(self): ''' Parse the input expression to get list of identifiers. ''' try: symTable = symtable.symtable(self.string, 'string', 'eval') except: raise IOError('Not a valid python math expression \n' + self.string) idents = symTable.get_identifiers() known = [] unknown = [] for ident in idents: if ident not in self._restricted: unknown.append(ident) else: known.append(ident) for val in unknown: band = val.split('_')[0] if len(band)!=1: raise IOError('Multi character variables in input expressions represent functions or constants. Unknown function or constant : %s'%(val)) elif (band.lower() != band): raise IOError('Single character upper case letters are used for constant. No available constant named %s'%(val)) return unknown, known def uniqueList(seq): ''' Returns a list with unique elements in a list. ''' seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] def bandsToFiles(bandList, logger): ''' Take a list of band names and convert it to file names. ''' flist = [] for band in bandList: names = band.split('_') if len(names) > 2: logger.error('Invalid band name: %s'%band) if names[0] not in flist: flist.append(names[0]) logger.debug('Number of input files : %d'%len(flist)) logger.debug('Input files: ' + str(flist)) return flist #######Create the logger for the application def createLogger(debug): ''' Creates an appopriate logger. ''' # logging.basicConfig() logger = logging.getLogger('imageMath') consoleHandler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name) s - %(levelname)s\n%(message)s') consoleHandler.setFormatter(formatter) if args.debug: logger.setLevel(logging.DEBUG) consoleHandler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) consoleHandler.setLevel(logging.INFO) logger.addHandler(consoleHandler) return logger ##########Classes and utils for memory maps class memmap(object): '''Create the memap object.''' def __init__(self,fname, mode='readonly', nchannels=1, nxx=None, nyy=None, scheme='BSQ', dataType='f'): '''Init function.''' fsize = np.zeros(1, dtype=dataType).itemsize if nxx is None: raise ValueError('Undefined file width for : %s'%(fname)) if mode=='write': if nyy is None: raise ValueError('Undefined file length for opening file: %s in write mode.'%(fname)) else: try: nbytes = os.path.getsize(fname) except: raise ValueError('Non-existent file : %s'%(fname)) if nyy is None: nyy = nbytes/(fsize*nchannels*nxx) if (nxx*nyy*fsize*nchannels) != nbytes: raise ValueError('File size mismatch for %s. Fractional number of lines'(fname)) elif (nxx*nyy*fsize*nchannels) > nbytes: raise ValueError('File size mismatch for %s. Number of bytes expected: %d'%(nbytes)) self.name = fname self.width = nxx self.length = nyy ####List of memmap objects acc = [] ####Create the memmap for the full file nshape = nchannels*nyy*nxx omap = np.memmap(fname, dtype=dataType, mode=mode, shape = (nshape,)) if scheme.upper() == 'BIL': nstrides = (nchannels*nxx*fsize, fsize) for band in xrange(nchannels): ###Starting offset noffset = band*nxx ###Temporary view tmap = omap[noffset:] ####Trick it into creating a 2D array fmap = as_strided(tmap, shape=(nyy,nxx), strides=nstrides) ###Add to list of objects acc.append(fmap) elif scheme.upper() == 'BSQ': nstrides = (fsize, fsize) for band in xrange(nchannels): ###Starting offset noffset = band*nxx*nyy ###Temporary view tmap = omap[noffset:noffset+nxx*nyy] ####Reshape into 2D array fmap = as_strided(tmap, shape=(nyy,nxx)) ###Add to lits of objects acc.append(fmap) elif scheme.upper() == 'BIP': nstrides = (nchannels*nxx*fsize,nchannels*fsize) for band in xrange(nchannels): ####Starting offset noffset = band ####Temporary view tmap = omap[noffset:] ####Trick it into interpreting ot as a 2D array fmap = as_strided(tmap, shape=(nyy,nxx), strides=nstrides) ####Add to the list of objects acc.append(fmap) else: raise ValueError('Unknown file scheme: %s for file %s'%(scheme,fname)) ######Assigning list of objects to self.bands self.bands = acc def mmapFromISCE(fname, logger): ''' Create a file mmap object using information in an ISCE XML. ''' try: import isce import iscesys from iscesys.Parsers.FileParserFactory import createFileParser except: raise ImportError('ISCE has not been installed or is not importable') if not fname.endswith('.xml'): dataName = fname metaName = fname + '.xml' else: metaName = fname dataName = os.path.splitext(fname)[0] parser = createFileParser('xml') prop, fac, misc = parser.parse(metaName) logger.debug('Creating readonly ISCE mmap with \n' + 'file = %s \n'%(dataName) + 'bands = %d \n'%(prop['number_bands']) + 'width = %d \n'%(prop['width']) + 'length = %d \n'%(prop['length'])+ 'scheme = %s \n'%(prop['scheme']) + 'dtype = %s \n'%(prop['data_type'])) mObj = memmap(dataName, nchannels=prop['number_bands'], nxx=prop['width'], nyy=prop['length'], scheme=prop['scheme'], dataType=NUMPY_type(prop['data_type'])) return mObj def mmapFromStr(fstr, logger): ''' Create a file mmap object using information provided on command line. Grammar = 'filename;width;datatype;bands;scheme' ''' def grammarError(): raise SyntaxError("Undefined image : %s \n" + "Grammar='filename;width;datatype;bands;scheme'"%(fstr)) parms = fstr.split(';') logger.debug('Input string: ' + str(parms)) if len(parms) < 2: grammarError() try: fname = parms[0] width = int(parms[1]) if len(parms)>2: datatype = NUMPY_type(parms[2]) else: datatype='f' if len(parms)>3: bands = int(parms[3]) else: bands = 1 if len(parms)>4: scheme = parms[4].upper() else: scheme = 'BSQ' if scheme not in ['BIL', 'BIP', 'BSQ']: raise IOError('Invalid file interleaving scheme: %s'%scheme) except: grammarError() logger.debug('Creating readonly mmap from string with \n' + 'file = %s \n'%(fname) + 'bands = %d \n'%(bands) + 'width = %d \n'%(width) + 'scheme = %s \n'%(scheme) + 'dtype = %s \n'%(datatype)) mObj = memmap(fname, nchannels=bands, nxx=width, scheme=scheme, dataType=datatype) return mObj pass #######ISCE XML rendering def renderISCEXML(fname, bands, nyy, nxx, datatype, scheme, descr): ''' Renders an ISCE XML with the right information. ''' try: import isce import isceobj except: raise ImportError('ISCE has not been installed or is not importable.') img = isceobj.createImage() img.filename = fname img.scheme = scheme img.width=nxx img.length = nyy try: img.dataType = isceTypeDict[datatype] except: try: img.dataType = isceTypeDict[NUMPY_type(datatype)] except: raise Exception('Processing complete but ISCE XML not written as the data type is currently not supported by ISCE Image Api') img.addDescription(descr) img.bands = bands img.setAccessMode('read') img.createImage() img.renderHdr() img.finalizeImage() #######Command line parsing def detailedHelp(): ''' Return the detailed help message. ''' msg = helpStr + '\n\n'+ \ 'Available Functions \n' + \ '********************\n' + \ str(fnDict.keys()) + '\n\n' + \ 'Available Constants \n' + \ '********************\n' + \ str(constDict.keys()) + '\n\n' + \ 'Available DataTypes -> numpy code mapping \n' + \ '***************************************** \n'+ \ printNUMPYMap() + '\n' return msg class customArgparseFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass class customArgparseAction(argparse.Action): def __call__(self, parser, args, values, option_string=None): ''' The action to be performed. ''' print detailedHelp() parser.print_help() parser.exit() def firstPassCommandLine(): ''' Take a first parse at command line parsing. Read only the basic required fields ''' #####Create the generic parser to get equation and output format first parser = argparse.ArgumentParser(description='ISCE Band math calculator.', formatter_class=customArgparseFormatter) # help_parser = subparser.add_ parser.add_argument('-H','--hh', nargs=0, action=customArgparseAction, help='Display detailed help information.') parser.add_argument('-e','--eval', type=str, required=True, action='store', help='Expression to evaluate.', dest='equation') parser.add_argument('-o','--out', type=str, default=None, action='store', help='Name of the output file', dest='out') parser.add_argument('-s','--scheme',type=str, default='BSQ', action='store', help='Output file format.', dest='scheme') parser.add_argument('-t','--type', type=str, default='float', action='store', help='Output data type.', dest='dtype') parser.add_argument('-d','--debug', action='store_true', default=False, help='Print debugging statements', dest='debug') parser.add_argument('-n','--noxml', action='store_true', default=False, help='Do not create an ISCE XML file for the output.', dest='noxml') #######Parse equation and output format first args, files = parser.parse_known_args() #####Check the output scheme for errors if args.scheme.upper() not in ['BSQ', 'BIL', 'BIP']: raise IOError('Unknown output scheme: %s'%(args.scheme)) iMath['outScheme'] = args.scheme.upper() npType = NUMPY_type(args.dtype) iMath['outType'] = npType return args, files class
(argparse.ArgumentParser): def error(self, message): raise Exception(message) def parseInputFile(varname, args): ''' Get the input string corresponding to given variable name. ''' inarg = varname.strip() ####Keyname corresponds to specific key = '--' + inarg if len(varname.strip()) > 1: raise IOError('Input variable names should be single characters.\n' + 'Invalid variable name: %s'%varname) if (inarg != inarg.lower()): raise IOError('Input variable names should be lower case. \n' + 'Invalud variable name: %s'%varname) #####Create a simple parser parser = customArgumentParser(description='Parser for band math.', add_help=False) parser.add_argument(key, type=str, required=True, action='store', help='Input string for a particular variable.', dest='instr') try: infile, rest = parser.parse_known_args(args) except: raise SyntaxError('Input file : "%s" not defined on command line'%varname) return infile.instr, rest #######The main driver that puts everything together if __name__ == '__main__': args, files = firstPassCommandLine() #######Set up logger appropriately logger = createLogger(args.debug) logger.debug('Known: '+ str(args)) logger.debug('Optional: '+ str(files)) #######Determine number of input and output bands bandList = [] for ii,expr in enumerate(args.equation.split(';')): #####Now parse the equation to get the file names used nsp = NumericStringParser(expr) logger.debug('Input Expression: %d : %s'%(ii, expr)) bands, known = nsp.parse() logger.debug('Unknown variables: ' + str(bands)) logger.debug('Known variables: ' + str(known)) iMath['equations'].append(expr) bandList = bandList + bands bandList = uniqueList(bandList) numOutBands = len(iMath['equations']) logger.debug('Number of output bands = %d'%(numOutBands)) logger.debug('Number of input bands used = %d'%(len(bandList))) logger.debug('Input bands used = ' + str(bandList)) #####Determine unique images from the bandList fileList = bandsToFiles(bandList, logger) ######Create input memmaps for ii,infile in enumerate(fileList): fstr, files = parseInputFile(infile, files) logger.debug('Input string for File %d: %s: %s'%(ii, infile, fstr)) if len(fstr.split(';')) > 1: fmap = mmapFromStr(fstr, logger) else: fmap = mmapFromISCE(fstr, logger) iMath['inFiles'][infile] = fmap if len(fmap.bands) == 1: iMath['inBands'][infile] = fmap.bands[0] for ii in xrange(len(fmap.bands)): iMath['inBands']['%s_%d'%(infile, ii)] = fmap.bands[ii] if len(files): raise IOError('Unused input variables set:\n'+ ' '.join(files)) #######Some debugging logger.debug('List of available bands: ' + str(iMath['inBands'].keys())) ####If used in calculator mode. if len(bandList) == 0: dataDict=dict(fnDict.items() + constDict.items()) logger.info('Calculator mode. No output files created') for ii, equation in enumerate(iMath['equations']): res=eval(expr, dataDict) logger.info('Output Band %d : %f '%(ii, res)) sys.exit(0) else: if args.out is None: raise IOError('Output file has not been defined.') #####Check if all bands in bandList have been accounted for for band in bandList: if band not in iMath['inBands'].keys(): raise ValueError('Undefined band : %s '%(band)) ######Check if all the widths match widths = [img.width for var,img in iMath['inFiles'].iteritems() ] if len(widths) != widths.count(widths[0]): logger.debug('Widths of images: ' + str([(var, img.name, img.width) for var,img in iMath['inFiles'].iteritems()])) raise IOError('Input images are not of same width') iMath['width'] = widths[0] logger.debug('Output Width = %d'%(iMath['width'])) #######Check if all the lengths match lengths=[img.length for var,img in iMath['inFiles'].iteritems()] if len(lengths) != lengths.count(lengths[0]): logger.debug('Lengths of images: ' + str([(var, img.name, img.length) for var,img in iMath['inFiles'].iteritems()])) raise IOError('Input images are not of the same length') iMath['length'] = lengths[0] logger.debug('Output Length = %d'%(iMath['length'])) #####Now create the output file outmap = memmap(args.out, mode='write', nchannels=numOutBands, nxx=iMath['width'], nyy=iMath['length'], scheme=iMath['outScheme'], dataType=iMath['outType']) logger.debug('Creating output ISCE mmap with \n' + 'file = %s \n'%(args.out) + 'bands = %d \n'%(numOutBands) + 'width = %d \n'%(iMath['width']) + 'length = %d \n'%(iMath['length'])+ 'scheme = %s \n'%(iMath['outScheme']) + 'dtype = %s \n'%(iMath['outType'])) iMath['outBands'] = outmap.bands #####Start evaluating the expressions ####Set up the name space to use dataDict=dict(fnDict.items() + constDict.items()) bands = iMath['inBands'] outBands = iMath['outBands'] #####Replace ^ by ** for lineno in xrange(iMath['length']): ####Load one line from each of the the bands for band in bandList: #iMath['inBands'].iteritems(): dataDict[band] = bands[band][lineno,:] ####For each output band for kk,expr in enumerate(iMath['equations']): res = eval(expr, dataDict) outBands[kk][lineno,:] = res ######Render ISCE XML if needed if not args.noxml: renderISCEXML(args.out, numOutBands, iMath['length'], iMath['width'], iMath['outType'], iMath['outScheme'], ' '.join(sys.argv))
customArgumentParser
add_kp_movie.py
import asyncio import os from datetime import date from os import getenv from django.core.files.images import ImageFile from django.core.management.base import BaseCommand from movies.models import Poster, Movie, Genre from person.models import Person, Photo, PersonRole from parser.formatter import get_formatted_movie_fields, get_formatted_person_fields, get_formatted_role_fields from parser.kinopoisk_api import KP from argparse import ArgumentParser class Command(BaseCommand): help = 'Get full film info from kinopoisk and add to database' def add_arguments(self, parser: ArgumentParser): parser.add_argument('movie_id', type=int) parser.add_argument('-k', '--api-key', default=getenv('KP_API_KEY')) async def _get_movie_info(self, kp: KP, movie_id: int): movie, persons = await kp.get_full_film_info(movie_id) posters = await kp.get_film_photo(movie_id) kp.REQUESTS_LIMIT = 50 photos_tasks = [asyncio.create_task(kp.get_person_photo(person["kp_id"])) for person in persons] photos = await asyncio.gather(*photos_tasks) return { 'movie': movie, 'posters': posters, 'persons': persons, 'photos': photos } def _get_kp_id_from_image_data(self, image_data: dict): filename: str = next(iter(image_data)) return int(filename.removesuffix('.jpg').removeprefix('person_').removeprefix('movie_')) @staticmethod def
(dirname): if not os.path.exists(dirname): os.mkdir(dirname) def add_person(self, raw_person_data: dict, photos) -> tuple[int, Person]: kp_id = int(raw_person_data.get('kp_id')) person_data = get_formatted_person_fields(raw_person_data) person_data['birth_date'] = date(*map(int, birth_date.split('-'))) \ if (birth_date := person_data['birth_date']) else None person_data['death'] = date(*map(int, birth_date.split('-'))) \ if (birth_date := person_data['death']) else None person: Person = Person.objects.get_or_create(**person_data)[0] if not person.photos.exists() and (image_bin := next(iter(photos[kp_id].values()))): self.safe_mkdir('temp') file_path = os.path.join('temp', next(iter(photos[kp_id]))) with open(file_path, 'wb') as f: f.write(image_bin) try: Photo(image=ImageFile(open(file_path, 'rb')), person=person, orientation=Photo.OrientationType.VERTICAL.name, format=Photo.FormatType.MEDIUM.name).save() finally: os.remove(file_path) return kp_id, person def handle(self, *args, **options): movie_id = options['movie_id'] self.main(movie_id, options['api_key']) def main(self, movie_id, api_key): print(api_key) kinopoisk = KP(api_key) self.stdout.write("Collect data") loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) future = asyncio.ensure_future(self._get_movie_info(kinopoisk, movie_id)) loop.run_until_complete(future) full_movie_info: dict = future.result() self.stdout.write(self.style.SUCCESS("Data received")) movie_info: dict = full_movie_info['movie'] genres = [Genre.objects.get_or_create(title=genre)[0] for genre in movie_info['genres']] formatted_movie_info = get_formatted_movie_fields(movie_info) # movie = Movie.objects.filter(**formatted_movie_info).first() if Movie.objects.filter(**formatted_movie_info).exists(): self.stdout.write(self.style.WARNING(f"Movie {movie_id} exists in this database")) return formatted_movie_info['movie_type_id'] = formatted_movie_info.pop('movie_type') movie: Movie = Movie(**formatted_movie_info) movie.save() self.stdout.write(f"Movie {movie} created") for genre in genres: movie.genres.add(genre) self.stdout.write(self.style.SUCCESS("Movie saved")) photos = {self._get_kp_id_from_image_data(image_data): image_data for image_data in full_movie_info['photos']} persons_kp_id_map = {} raw_person_data: dict for raw_person_data in full_movie_info['persons']: kp_id, person = self.add_person(raw_person_data, photos) persons_kp_id_map[kp_id] = person self.stdout.write(self.style.SUCCESS("Persons saved")) for role in movie_info['roles']: PersonRole(**get_formatted_role_fields(role, movie, persons_kp_id_map[int(role['kp_id'])])).save() self.stdout.write(self.style.SUCCESS("Roles saved")) for filename, image_bin in full_movie_info['posters'].items(): if not image_bin: continue self.safe_mkdir('temp') file_path = os.path.join('temp', filename) with open(file_path, 'wb') as f: f.write(image_bin) try: Poster(movie=movie, image=ImageFile(open(file_path, 'rb')), orientation=Poster.OrientationType.VERTICAL.name, format=Poster.FormatType.LARGE.name if '_small' in filename else Poster.FormatType.LARGE.name). \ save() finally: os.remove(file_path) os.rmdir('temp') self.stdout.write(self.style.SUCCESS("Posters saved"))
safe_mkdir
balances.go
package consumption // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/tracing" "net/http" ) // BalancesClient is the consumption management client provides access to consumption resources for Azure Enterprise // Subscriptions. type BalancesClient struct { BaseClient } // NewBalancesClient creates an instance of the BalancesClient client. func NewBalancesClient(subscriptionID string) BalancesClient { return NewBalancesClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewBalancesClientWithBaseURI creates an instance of the BalancesClient client using a custom endpoint. Use this // when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewBalancesClientWithBaseURI(baseURI string, subscriptionID string) BalancesClient { return BalancesClient{NewWithBaseURI(baseURI, subscriptionID)} } // GetByBillingAccount gets the balances for a scope by billingAccountId. Balances are available via this API only for // May 1, 2014 or later. // Parameters: // billingAccountID - billingAccount ID func (client BalancesClient) GetByBillingAccount(ctx context.Context, billingAccountID string) (result Balance, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BalancesClient.GetByBillingAccount") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetByBillingAccountPreparer(ctx, billingAccountID) if err != nil { err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetByBillingAccount", nil, "Failure preparing request") return } resp, err := client.GetByBillingAccountSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetByBillingAccount", resp, "Failure sending request") return } result, err = client.GetByBillingAccountResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetByBillingAccount", resp, "Failure responding to request") } return } // GetByBillingAccountPreparer prepares the GetByBillingAccount request. func (client BalancesClient) GetByBillingAccountPreparer(ctx context.Context, billingAccountID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "billingAccountId": autorest.Encode("path", billingAccountID), } const APIVersion = "2018-06-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.Consumption/balances", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetByBillingAccountSender sends the GetByBillingAccount request. The method will close the // http.Response Body if it receives an error. func (client BalancesClient) GetByBillingAccountSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetByBillingAccountResponder handles the response to the GetByBillingAccount request. The method always // closes the http.Response Body. func (client BalancesClient) GetByBillingAccountResponder(resp *http.Response) (result Balance, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // GetForBillingPeriodByBillingAccount gets the balances for a scope by billing period and billingAccountId. Balances // are available via this API only for May 1, 2014 or later. // Parameters: // billingAccountID - billingAccount ID // billingPeriodName - billing Period Name. func (client BalancesClient) GetForBillingPeriodByBillingAccount(ctx context.Context, billingAccountID string, billingPeriodName string) (result Balance, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BalancesClient.GetForBillingPeriodByBillingAccount") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetForBillingPeriodByBillingAccountPreparer(ctx, billingAccountID, billingPeriodName) if err != nil { err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetForBillingPeriodByBillingAccount", nil, "Failure preparing request") return } resp, err := client.GetForBillingPeriodByBillingAccountSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetForBillingPeriodByBillingAccount", resp, "Failure sending request") return } result, err = client.GetForBillingPeriodByBillingAccountResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "consumption.BalancesClient", "GetForBillingPeriodByBillingAccount", resp, "Failure responding to request") } return } // GetForBillingPeriodByBillingAccountPreparer prepares the GetForBillingPeriodByBillingAccount request. func (client BalancesClient) GetForBillingPeriodByBillingAccountPreparer(ctx context.Context, billingAccountID string, billingPeriodName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "billingAccountId": autorest.Encode("path", billingAccountID), "billingPeriodName": autorest.Encode("path", billingPeriodName), } const APIVersion = "2018-06-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer(
autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.Billing/billingPeriods/{billingPeriodName}/providers/Microsoft.Consumption/balances", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetForBillingPeriodByBillingAccountSender sends the GetForBillingPeriodByBillingAccount request. The method will close the // http.Response Body if it receives an error. func (client BalancesClient) GetForBillingPeriodByBillingAccountSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetForBillingPeriodByBillingAccountResponder handles the response to the GetForBillingPeriodByBillingAccount request. The method always // closes the http.Response Body. func (client BalancesClient) GetForBillingPeriodByBillingAccountResponder(resp *http.Response) (result Balance, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
autorest.AsGet(), autorest.WithBaseURL(client.BaseURI),
txunmined.go
// Copyright (c) 2013-2016 The btcsuite developers // Copyright (c) 2015-2017 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package udb import ( "bytes" "decred.org/dcrwallet/errors" "decred.org/dcrwallet/wallet/walletdb" "github.com/decred/dcrd/blockchain/stake/v3" "github.com/decred/dcrd/chaincfg/chainhash" "github.com/decred/dcrd/wire" ) // InsertMemPoolTx inserts a memory pool transaction record. It also marks // previous outputs referenced by its inputs as spent. Errors with the // DoubleSpend code if another unmined transaction is a double spend of this // transaction. func (s *Store) InsertMemPoolTx(ns walletdb.ReadWriteBucket, rec *TxRecord) error { _, recVal := latestTxRecord(ns, rec.Hash[:]) if recVal != nil { return errors.E(errors.Exist, "transaction already exists mined") } v := existsRawUnmined(ns, rec.Hash[:]) if v != nil { return nil } // Check for other unmined transactions which cause this tx to be a double // spend. Unlike mined transactions that double spend an unmined tx, // existing unmined txs are not removed when inserting a double spending // unmined tx. // // An exception is made for this rule for votes and revocations that double // spend an unmined vote that doesn't vote on the tip block. for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) if v := existsRawUnminedInput(ns, k); v != nil { var spenderHash chainhash.Hash readRawUnminedInputSpenderHash(v, &spenderHash) // A switch is used here instead of an if statement so it can be // broken out of to the error below. DoubleSpendVoteCheck: switch rec.TxType { case stake.TxTypeSSGen, stake.TxTypeSSRtx: spenderVal := existsRawUnmined(ns, spenderHash[:]) spenderTxBytes := extractRawUnminedTx(spenderVal) var spenderTx wire.MsgTx err := spenderTx.Deserialize(bytes.NewReader(spenderTxBytes)) if err != nil { return errors.E(errors.IO, err) } if stake.DetermineTxType(&spenderTx) != stake.TxTypeSSGen { break DoubleSpendVoteCheck } votedBlock, _ := stake.SSGenBlockVotedOn(&spenderTx) tipBlock, _ := s.MainChainTip(ns) if votedBlock == tipBlock { err := errors.Errorf("vote or revocation %v double spends unmined "+ "vote %v on the tip block", &rec.Hash, &spenderHash) return errors.E(errors.DoubleSpend, err) } err = s.RemoveUnconfirmed(ns, &spenderTx, &spenderHash) if err != nil { return err } continue } err := errors.Errorf("%v conflicts with %v by double spending %v", &rec.Hash, &spenderHash, prevOut) return errors.E(errors.DoubleSpend, err) } } log.Infof("Inserting unconfirmed transaction %v", &rec.Hash) v, err := valueTxRecord(rec) if err != nil { return err } err = putRawUnmined(ns, rec.Hash[:], v) if err != nil { return err } txType := stake.DetermineTxType(&rec.MsgTx) for i, input := range rec.MsgTx.TxIn { // Skip stakebases for votes. if i == 0 && txType == stake.TxTypeSSGen { continue } prevOut := &input.PreviousOutPoint k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) err = putRawUnminedInput(ns, k, rec.Hash[:]) if err != nil { return err } } // If the transaction is a ticket purchase, record it in the ticket // purchases bucket. if txType == stake.TxTypeSStx { tk := rec.Hash[:] tv := existsRawTicketRecord(ns, tk) if tv == nil { tv = valueTicketRecord(-1) err := putRawTicketRecord(ns, tk, tv) if err != nil { return err } } } // TODO: increment credit amount for each credit (but those are unknown // here currently). return nil } // removeDoubleSpends checks for any unmined transactions which would introduce // a double spend if tx was added to the store (either as a confirmed or unmined // transaction). Each conflicting transaction and all transactions which spend // it are recursively removed. func (s *Store) removeDoubleSpends(ns walletdb.ReadWriteBucket, rec *TxRecord) error { for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint prevOutKey := canonicalOutPoint(&prevOut.Hash, prevOut.Index) doubleSpendHash := existsRawUnminedInput(ns, prevOutKey) if doubleSpendHash != nil { var doubleSpend TxRecord doubleSpendVal := existsRawUnmined(ns, doubleSpendHash) copy(doubleSpend.Hash[:], doubleSpendHash) // Silly but need an array err := readRawTxRecord(&doubleSpend.Hash, doubleSpendVal, &doubleSpend) if err != nil { return err } log.Debugf("Removing double spending transaction %v", doubleSpend.Hash) err = s.RemoveUnconfirmed(ns, &doubleSpend.MsgTx, &doubleSpend.Hash) if err != nil { return err } } } return nil } // RemoveUnconfirmed removes an unmined transaction record and all spend chains // deriving from it from the store. This is designed to remove transactions // that would otherwise result in double spend conflicts if left in the store, // and to remove transactions that spend coinbase transactions on reorgs. It // can also be used to remove old tickets that do not meet the network difficulty // and expired transactions. func (s *Store) RemoveUnconfirmed(ns walletdb.ReadWriteBucket, tx *wire.MsgTx, txHash *chainhash.Hash) error { stxType := stake.DetermineTxType(tx) // For each potential credit for this record, each spender (if any) must // be recursively removed as well. Once the spenders are removed, the // credit is deleted. numOuts := uint32(len(tx.TxOut)) for i := uint32(0); i < numOuts; i++ { k := canonicalOutPoint(txHash, i) spenderHash := existsRawUnminedInput(ns, k) if spenderHash != nil { var spender TxRecord spenderVal := existsRawUnmined(ns, spenderHash) copy(spender.Hash[:], spenderHash) // Silly but need an array err := readRawTxRecord(&spender.Hash, spenderVal, &spender) if err != nil { return err } log.Debugf("Transaction %v is part of a removed conflict "+ "chain -- removing as well", spender.Hash) err = s.RemoveUnconfirmed(ns, &spender.MsgTx, &spender.Hash) if err != nil { return err } } err := deleteRawUnminedCredit(ns, k) if err != nil { return err } if (stxType == stake.TxTypeSStx) && (i%2 == 1) { // An unconfirmed ticket leaving the store means we need to delete // the respective commitment and its entry from the unspent // commitments index. // This assumes that the key to ticket commitment values in the db are // canonicalOutPoints. If this ever changes, this needs to be changed to // use keyTicketCommitment(...). ktc := k vtc := existsRawTicketCommitment(ns, ktc) if vtc != nil { log.Debugf("Removing unconfirmed ticket commitment %s:%d", txHash, i) err = deleteRawTicketCommitment(ns, ktc) if err != nil { return err } err = deleteRawUnspentTicketCommitment(ns, ktc) if err != nil { return err } } } } if (stxType == stake.TxTypeSSGen) || (stxType == stake.TxTypeSSRtx) { // An unconfirmed vote/revocation leaving the store means we need to // unmark the commitments of the respective ticket as unminedSpent. err := s.replaceTicketCommitmentUnminedSpent(ns, stxType, tx, false) if err != nil { return err } } // If this tx spends any previous credits (either mined or unmined), set // each unspent. Mined transactions are only marked spent by having the // output in the unmined inputs bucket. for _, input := range tx.TxIn { prevOut := &input.PreviousOutPoint k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) err := deleteRawUnminedInput(ns, k) if err != nil { return err } // If a multisig output is recorded for this input, mark it unspent. if v := existsMultisigOut(ns, k); v != nil { vcopy := make([]byte, len(v)) copy(vcopy, v) setMultisigOutUnSpent(vcopy) err := putMultisigOutRawValues(ns, k, vcopy) if err != nil { return err } err = putMultisigOutUS(ns, k) if err != nil { return err } } } return deleteRawUnmined(ns, txHash[:]) } // UnminedTxs returns the underlying transactions for all unmined transactions // which are not known to have been mined in a block. Transactions are // guaranteed to be sorted by their dependency order. func (s *Store) UnminedTxs(ns walletdb.ReadBucket) ([]*wire.MsgTx, error) { recSet, err := s.unminedTxRecords(ns) if err != nil { return nil, err } recs := dependencySort(recSet) txs := make([]*wire.MsgTx, 0, len(recs)) for _, rec := range recs { txs = append(txs, &rec.MsgTx) } return txs, nil } func (s *Store) unminedTxRecords(ns walletdb.ReadBucket) (map[chainhash.Hash]*TxRecord, error) { unmined := make(map[chainhash.Hash]*TxRecord) err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { var txHash chainhash.Hash err := readRawUnminedHash(k, &txHash) if err != nil { return err } rec := new(TxRecord) err = readRawTxRecord(&txHash, v, rec) if err != nil { return err } unmined[rec.Hash] = rec return nil }) return unmined, err } // UnminedTxHashes returns the hashes of all transactions not known to have been // mined in a block. func (s *Store) UnminedTxHashes(ns walletdb.ReadBucket) ([]*chainhash.Hash, error) { return s.unminedTxHashes(ns) } func (s *Store) unminedTxHashes(ns walletdb.ReadBucket) ([]*chainhash.Hash, error) { var hashes []*chainhash.Hash err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { hash := new(chainhash.Hash) err := readRawUnminedHash(k, hash) if err == nil { hashes = append(hashes, hash) } return err }) return hashes, err } // PruneUnmined removes unmined transactions that no longer belong in the // unmined tx set. This includes: // // * Any transactions past a set expiry // * Ticket purchases with a different ticket price than the passed stake // difficulty // * Votes that do not vote on the tip block func (s *Store) PruneUnmined(dbtx walletdb.ReadWriteTx, stakeDiff int64) ([]*chainhash.Hash, error) { ns := dbtx.ReadWriteBucket(wtxmgrBucketKey) tipHash, tipHeight := s.MainChainTip(ns) type removeTx struct { tx wire.MsgTx hash *chainhash.Hash } var toRemove []*removeTx c := ns.NestedReadBucket(bucketUnmined).ReadCursor() defer c.Close() for k, v := c.First(); k != nil; k, v = c.Next() { var tx wire.MsgTx err := tx.Deserialize(bytes.NewReader(extractRawUnminedTx(v))) if err != nil { return nil, errors.E(errors.IO, err) } var expired, isTicketPurchase, isVote bool switch { case tx.Expiry != wire.NoExpiryValue && tx.Expiry <= uint32(tipHeight): expired = true case stake.IsSStx(&tx): isTicketPurchase = true if tx.TxOut[0].Value == stakeDiff { continue } case stake.IsSSGen(&tx): isVote = true // This will never actually error votedBlockHash, _ := stake.SSGenBlockVotedOn(&tx) if votedBlockHash == tipHash { continue } default: continue } txHash, err := chainhash.NewHash(k) if err != nil
if expired { log.Infof("Removing expired unmined transaction %v", txHash) } else if isTicketPurchase { log.Infof("Removing old unmined ticket purchase %v", txHash) } else if isVote { log.Infof("Removing missed or invalid vote %v", txHash) } toRemove = append(toRemove, &removeTx{tx, txHash}) } removed := make([]*chainhash.Hash, 0, len(toRemove)) for _, r := range toRemove { err := s.RemoveUnconfirmed(ns, &r.tx, r.hash) if err != nil { return removed, err } removed = append(removed, r.hash) } return removed, nil }
{ return nil, errors.E(errors.IO, err) }
cdc.rs
//! Communications Class Device for USB //! //! This capsule allows Tock to support a serial port over USB. use core::cell::Cell; use core::cmp; use super::descriptors; use super::descriptors::Buffer64; use super::descriptors::CdcInterfaceDescriptor; use super::descriptors::EndpointAddress; use super::descriptors::EndpointDescriptor; use super::descriptors::InterfaceDescriptor; use super::descriptors::TransferDirection; use super::usbc_client_ctrl::ClientCtrl; use kernel::common::cells::OptionalCell; use kernel::common::cells::TakeCell; use kernel::common::cells::VolatileCell; use kernel::hil; use kernel::hil::uart; use kernel::hil::usb::TransferType; use kernel::ReturnCode; const VENDOR_ID: u16 = 0x6668; const PRODUCT_ID: u16 = 0xabce; /// Identifying number for the endpoint when transferring data from us to the /// host. const ENDPOINT_IN_NUM: usize = 2; /// Identifying number for the endpoint when transferring data from the host to /// us. const ENDPOINT_OUT_NUM: usize = 3; static LANGUAGES: &'static [u16; 1] = &[ 0x0409, // English (United States) ]; static STRINGS: &'static [&'static str] = &[ "TockOS", // Manufacturer "The Zorpinator", // Product "123456", // Serial number ]; /// Platform-specific packet length for the `SAM4L` USB hardware. pub const MAX_CTRL_PACKET_SIZE_SAM4L: u8 = 8; /// Platform-specific packet length for the `nRF52` USB hardware. pub const MAX_CTRL_PACKET_SIZE_NRF52840: u8 = 64; /// Platform-specific packet length for the `ibex` USB hardware. pub const MAX_CTRL_PACKET_SIZE_IBEX: u8 = 64; const N_ENDPOINTS: usize = 3; /// Implementation of the Abstract Control Model (ACM) for the Communications /// Class Device (CDC) over USB. pub struct CdcAcm<'a, U: 'a> { /// Helper USB client library for handling many USB operations. client_ctrl: ClientCtrl<'a, 'static, U>, /// 64 byte buffers for each endpoint. buffers: [Buffer64; N_ENDPOINTS], /// A holder reference for the TX buffer we are transmitting from. tx_buffer: TakeCell<'static, [u8]>, /// The number of bytes the client has asked us to send. We track this so we /// can pass it back to the client when the transmission has finished. tx_len: Cell<usize>, /// Where in the `tx_buffer` we need to start sending from when we continue. tx_offset: Cell<usize>, /// The TX client to use when transmissions finish. tx_client: OptionalCell<&'a dyn uart::TransmitClient>, /// A holder for the buffer to receive bytes into. We use this as a flag as /// well, if we have a buffer then we are actively doing a receive. rx_buffer: TakeCell<'static, [u8]>, /// How many bytes the client wants us to receive. rx_len: Cell<usize>, /// How many bytes we have received so far. rx_offset: Cell<usize>, /// The RX client to use when RX data is received. rx_client: OptionalCell<&'a dyn uart::ReceiveClient>, } impl<'a, U: hil::usb::UsbController<'a>> CdcAcm<'a, U> { pub fn new(controller: &'a U, max_ctrl_packet_size: u8) -> Self { let interfaces: &mut [InterfaceDescriptor] = &mut [ InterfaceDescriptor { interface_number: 0, interface_class: 0x02, // CDC communication interface_subclass: 0x02, // abstract control model (ACM) interface_protocol: 0x01, // V.25ter (AT commands) ..InterfaceDescriptor::default() }, InterfaceDescriptor { interface_number: 1, interface_class: 0x0a, // CDC data interface_subclass: 0x00, // none interface_protocol: 0x00, // none ..InterfaceDescriptor::default() }, ]; let cdc_descriptors: &mut [CdcInterfaceDescriptor] = &mut [ CdcInterfaceDescriptor { subtype: descriptors::CdcInterfaceDescriptorSubType::Header, field1: 0x10, // CDC field2: 0x11, // CDC }, CdcInterfaceDescriptor { subtype: descriptors::CdcInterfaceDescriptorSubType::CallManagement, field1: 0x00, // Capabilities field2: 0x01, // Data interface 1 }, CdcInterfaceDescriptor { subtype: descriptors::CdcInterfaceDescriptorSubType::AbstractControlManagement, field1: 0x06, // Capabilities field2: 0x00, // unused }, CdcInterfaceDescriptor { subtype: descriptors::CdcInterfaceDescriptorSubType::Union, field1: 0x00, // Interface 0 field2: 0x01, // Interface 1 }, ]; let endpoints: &[&[EndpointDescriptor]] = &[ &[EndpointDescriptor { endpoint_address: EndpointAddress::new_const(4, TransferDirection::DeviceToHost), transfer_type: TransferType::Interrupt, max_packet_size: 8, interval: 16, }], &[ EndpointDescriptor { endpoint_address: EndpointAddress::new_const( 2, TransferDirection::DeviceToHost, ), transfer_type: TransferType::Bulk, max_packet_size: 64, interval: 0, }, EndpointDescriptor { endpoint_address: EndpointAddress::new_const( 3, TransferDirection::HostToDevice, ), transfer_type: TransferType::Bulk, max_packet_size: 64, interval: 0, }, ], ]; let (device_descriptor_buffer, other_descriptor_buffer) = descriptors::create_descriptor_buffers( descriptors::DeviceDescriptor { vendor_id: VENDOR_ID, product_id: PRODUCT_ID, manufacturer_string: 1, product_string: 2, serial_number_string: 3, class: 0x2, // Class: CDC max_packet_size_ep0: max_ctrl_packet_size, ..descriptors::DeviceDescriptor::default() }, descriptors::ConfigurationDescriptor { ..descriptors::ConfigurationDescriptor::default() }, interfaces, endpoints, None, // No HID descriptor Some(cdc_descriptors), ); CdcAcm { client_ctrl: ClientCtrl::new( controller, device_descriptor_buffer, other_descriptor_buffer, None, // No HID descriptor None, // No report descriptor LANGUAGES, STRINGS, ), buffers: [ Buffer64::default(), Buffer64::default(), Buffer64::default(), ], tx_buffer: TakeCell::empty(), tx_len: Cell::new(0), tx_offset: Cell::new(0), tx_client: OptionalCell::empty(), rx_buffer: TakeCell::empty(), rx_len: Cell::new(0), rx_offset: Cell::new(0), rx_client: OptionalCell::empty(), } } #[inline] fn controller(&self) -> &'a U { self.client_ctrl.controller() } #[inline] fn buffer(&'a self, i: usize) -> &'a [VolatileCell<u8>; 64] { &self.buffers[i - 1].buf } } impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CdcAcm<'a, U> { fn
(&'a self) { // Set up the default control endpoint self.client_ctrl.enable(); // Setup buffers for IN and OUT data transfer. self.controller() .endpoint_set_in_buffer(ENDPOINT_IN_NUM, self.buffer(ENDPOINT_IN_NUM)); self.controller() .endpoint_in_enable(TransferType::Bulk, ENDPOINT_IN_NUM); self.controller() .endpoint_set_out_buffer(ENDPOINT_OUT_NUM, self.buffer(ENDPOINT_OUT_NUM)); self.controller() .endpoint_out_enable(TransferType::Bulk, ENDPOINT_OUT_NUM); } fn attach(&'a self) { self.client_ctrl.attach(); } fn bus_reset(&'a self) { // No need to handle this at this layer. } /// Handle a Control Setup transaction fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult { self.client_ctrl.ctrl_setup(endpoint) } /// Handle a Control In transaction fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult { self.client_ctrl.ctrl_in(endpoint) } /// Handle a Control Out transaction fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult { // Hack to make sure we ask to send data if we have a buffer queued. We // expect control out messages when the host actually connects via CDC, // so we use this to generate the data IN request. if self.tx_buffer.is_some() { self.controller().endpoint_resume_in(ENDPOINT_IN_NUM); } self.client_ctrl.ctrl_out(endpoint, packet_bytes) } fn ctrl_status(&'a self, endpoint: usize) { self.client_ctrl.ctrl_status(endpoint) } /// Handle the completion of a Control transfer fn ctrl_status_complete(&'a self, endpoint: usize) { self.client_ctrl.ctrl_status_complete(endpoint) } /// Handle a Bulk/Interrupt IN transaction. /// /// This is called when we can send data to the host. It should get called /// when we tell the controller we want to resume the IN endpoint (meaning /// we know we have data to send) and afterwards until we return /// `hil::usb::InResult::Delay` from this function. That means we can use /// this as a callback to mean that the transmission finished by waiting /// until this function is called when we don't have anything left to send. fn packet_in(&'a self, transfer_type: TransferType, endpoint: usize) -> hil::usb::InResult { match transfer_type { TransferType::Bulk => { self.tx_buffer .take() .map_or(hil::usb::InResult::Delay, |tx_buf| { // Check if we have any bytes to send. let offset = self.tx_offset.get(); let remaining = self.tx_len.get() - offset; if remaining > 0 { // We do, so we go ahead and send those. // Get packet that we have shared with the underlying // USB stack to copy the tx into. let packet = self.buffer(endpoint); // Calculate how much more we can send. let to_send = cmp::min(packet.len(), remaining); // Copy from the TX buffer to the outgoing USB packet. for i in 0..to_send { packet[i].set(tx_buf[offset + i]); } // Update our state on how much more there is to send. self.tx_offset.set(offset + to_send); // Put the TX buffer back so we can keep sending from it. self.tx_buffer.replace(tx_buf); // Return that we have data to send. hil::usb::InResult::Packet(to_send) } else { // We don't have anything to send, so that means we are // ok to signal the callback. // Signal the callback and pass back the TX buffer. self.tx_client.map(move |tx_client| { tx_client.transmitted_buffer( tx_buf, self.tx_len.get(), ReturnCode::SUCCESS, ) }); // Return that we have nothing else to do to the USB // driver. hil::usb::InResult::Delay } }) } TransferType::Control | TransferType::Isochronous | TransferType::Interrupt => { // Nothing to do for CDC ACM. hil::usb::InResult::Delay } } } /// Handle a Bulk/Interrupt OUT transaction fn packet_out( &'a self, transfer_type: TransferType, endpoint: usize, packet_bytes: u32, ) -> hil::usb::OutResult { match transfer_type { TransferType::Bulk => { // Start by checking to see if we even care about this RX or // not. self.rx_buffer.take().map(|rx_buf| { let rx_offset = self.rx_offset.get(); // How many more bytes can we store in our RX buffer? let available_bytes = rx_buf.len() - rx_offset; let copy_length = cmp::min(packet_bytes as usize, available_bytes); // Do the copy into the RX buffer. let packet = self.buffer(endpoint); for i in 0..copy_length { rx_buf[rx_offset + i] = packet[i].get(); } // Keep track of how many bytes we have received so far. let total_received_bytes = rx_offset + copy_length; // Update how many bytes we have gotten. self.rx_offset.set(total_received_bytes); // Check if we have received at least as many bytes as the // client asked for. if total_received_bytes >= self.rx_len.get() { self.rx_client.map(move |client| { client.received_buffer( rx_buf, total_received_bytes, ReturnCode::SUCCESS, uart::Error::None, ); }); } else { // Make sure to put the RX buffer back. self.rx_buffer.replace(rx_buf); } }); // No error cases to report to the USB. hil::usb::OutResult::Ok } TransferType::Control | TransferType::Isochronous | TransferType::Interrupt => { // Nothing to do for CDC ACM. hil::usb::OutResult::Ok } } } fn packet_transmitted(&'a self, _endpoint: usize) { // Check if more to send. self.tx_buffer.take().map(|tx_buf| { // Check if we have any bytes to send. let remaining = self.tx_len.get() - self.tx_offset.get(); if remaining > 0 { // We do, so ask to send again. self.tx_buffer.replace(tx_buf); self.controller().endpoint_resume_in(ENDPOINT_IN_NUM); } else { // We don't have anything to send, so that means we are // ok to signal the callback. // Signal the callback and pass back the TX buffer. self.tx_client.map(move |tx_client| { tx_client.transmitted_buffer(tx_buf, self.tx_len.get(), ReturnCode::SUCCESS) }); } }); } } impl<'a, U: hil::usb::UsbController<'a>> uart::Configure for CdcAcm<'a, U> { fn configure(&self, _parameters: uart::Parameters) -> ReturnCode { // Since this is not a real UART, we don't need to consider these // parameters. ReturnCode::SUCCESS } } impl<'a, U: hil::usb::UsbController<'a>> uart::Transmit<'a> for CdcAcm<'a, U> { fn set_transmit_client(&self, client: &'a dyn uart::TransmitClient) { self.tx_client.set(client); } fn transmit_buffer( &self, tx_buffer: &'static mut [u8], tx_len: usize, ) -> (ReturnCode, Option<&'static mut [u8]>) { if self.tx_buffer.is_some() { // We are already handling a transmission, we cannot queue another // request. (ReturnCode::EBUSY, Some(tx_buffer)) } else if tx_len > tx_buffer.len() { // Can't send more bytes than will fit in the buffer. (ReturnCode::ESIZE, Some(tx_buffer)) } else { // Ok, we can handle this transmission. Initialize all of our state // for our TX state machine. self.tx_len.set(tx_len); self.tx_offset.set(0); self.tx_buffer.replace(tx_buffer); // Then signal to the lower layer that we are ready to do a TX by // putting data in the IN endpoint. self.controller().endpoint_resume_in(ENDPOINT_IN_NUM); (ReturnCode::SUCCESS, None) } } fn transmit_abort(&self) -> ReturnCode { ReturnCode::FAIL } fn transmit_word(&self, _word: u32) -> ReturnCode { ReturnCode::FAIL } } impl<'a, U: hil::usb::UsbController<'a>> uart::Receive<'a> for CdcAcm<'a, U> { fn set_receive_client(&self, client: &'a dyn uart::ReceiveClient) { self.rx_client.set(client); } fn receive_buffer( &self, rx_buffer: &'static mut [u8], rx_len: usize, ) -> (ReturnCode, Option<&'static mut [u8]>) { if self.rx_buffer.is_some() { (ReturnCode::EBUSY, Some(rx_buffer)) } else if rx_len > rx_buffer.len() { (ReturnCode::ESIZE, Some(rx_buffer)) } else { self.rx_buffer.replace(rx_buffer); self.rx_offset.set(0); self.rx_len.set(rx_len); (ReturnCode::SUCCESS, None) } } fn receive_abort(&self) -> ReturnCode { ReturnCode::FAIL } fn receive_word(&self) -> ReturnCode { ReturnCode::FAIL } } impl<'a, U: hil::usb::UsbController<'a>> uart::Uart<'a> for CdcAcm<'a, U> {} impl<'a, U: hil::usb::UsbController<'a>> uart::UartData<'a> for CdcAcm<'a, U> {}
enable
Regions.component.ts
import { Component, ViewEncapsulation } from '@angular/core'; @Component({ selector: 'regions',
export class RegionsComponent { }
templateUrl: 'app/RegionsModule/views/index.html', styleUrls: ['app/RegionsModule/views/index.css'], encapsulation: ViewEncapsulation.Emulated })
left.rs
use std::ops::Deref; use ruma::api::client::membership::forget_room; use crate::{room::Common, BaseRoom, Client, Result, RoomType}; /// A room in the left state. /// /// This struct contains all methods specific to a `Room` with type /// `RoomType::Left`. Operations may fail once the underlying `Room` changes /// `RoomType`. #[derive(Debug, Clone)] pub struct Left { pub(crate) inner: Common, } impl Left { /// Create a new `room::Left` if the underlying `Room` has type /// `RoomType::Left`. /// /// # Arguments /// * `client` - The client used to make requests. /// /// * `room` - The underlying room. pub fn new(client: Client, room: BaseRoom) -> Option<Self> { // TODO: Make this private if room.room_type() == RoomType::Left { Some(Self { inner: Common::new(client, room) }) } else { None } } /// Join this room. pub async fn join(&self) -> Result<()>
/// Forget this room. /// /// This communicates to the homeserver that it should forget the room. pub async fn forget(&self) -> Result<()> { let request = forget_room::v3::Request::new(self.inner.room_id()); let _response = self.client.send(request, None).await?; self.client.store().remove_room(self.inner.room_id()).await?; Ok(()) } } impl Deref for Left { type Target = Common; fn deref(&self) -> &Self::Target { &self.inner } }
{ self.inner.join().await }
gpro_corpus.py
import codecs import logging import pickle from chemdner_corpus import ChemdnerCorpus class GproCorpus(ChemdnerCorpus): """Chemdner GPRO corpus from BioCreative V""" def __init__(self, corpusdir, **kwargs): super(GproCorpus, self).__init__(corpusdir, **kwargs) self.subtypes = ["NESTED", "IDENTIFIER", "FULL_NAME", "ABBREVIATION"] def
(self, corenlpserver): """ Assume the corpus is already loaded as a ChemdnerCorpus Load the pickle and get the docs :param corenlpserver: :return: """ ps = self.path.split("/") cemp_path = "data/chemdner_" + "_".join(ps[-1].split("_")[1:]) + ".pickle" corpus = pickle.load(open(cemp_path, 'rb')) self.documents = corpus.documents def load_annotations(self, ann_dir, etype="protein"): logging.info("loading annotations file {}...".format(ann_dir)) with codecs.open(ann_dir, 'r', "utf-8") as inputfile: for line in inputfile: # logging.info("processing annotation %s/%s" % (n_lines, total_lines)) pmid, doct, start, end, text, t, dbid = line.strip().split('\t') if dbid != "GPRO_TYPE_2" and pmid in self.documents: #if pmid in self.documents: #pmid = "PMID" + pmid # For now, ignore the database ID information #logging.debug("using this annotation: {}".format(text.encode("utf8"))) self.documents[pmid].tag_chemdner_entity(int(start), int(end), t, text=text, doct=doct) elif pmid not in self.documents: logging.info("%s not found!" % pmid)
load_corpus
versionutilities.ts
import * as path from 'path'; import * as tl from 'vsts-task-lib/task'; import * as toolLib from 'vsts-task-tool-lib'; import * as semver from 'semver'; import { VersionInfo } from "./models" export function
(versionA: string, versionB: string): number { if (!toolLib.isExplicitVersion(versionA) || !toolLib.isExplicitVersion(versionB)) { throw tl.loc("VersionsCanNotBeCompared", versionA, versionB); } return semver.compare(versionA, versionB); } export function compareChannelVersion(channelVersionA: string, channelVersionB: string): number { if (!channelVersionA || !channelVersionB) { throw "One channel version is missing" } let channelVersionAParts = channelVersionA.split("."); let channelVersionBParts = channelVersionB.split("."); if (channelVersionAParts.length != 2 || channelVersionBParts.length != 2) { throw tl.loc("ChannelVersionsNotComparable", channelVersionA, channelVersionB) } let channelAMajorVersion = Number.parseInt(channelVersionAParts[0]); let channelAMinorVersion = Number.parseInt(channelVersionAParts[1]); let channelBMajorVersion = Number.parseInt(channelVersionBParts[0]); let channelBMinorVersion = Number.parseInt(channelVersionBParts[1]); if (Number.isNaN(channelAMajorVersion) || Number.isNaN(channelAMinorVersion) || Number.isNaN(channelBMajorVersion) || Number.isNaN(channelBMinorVersion)) { throw tl.loc("ChannelVersionsNotComparable", channelVersionA, channelVersionB); } if (channelAMajorVersion != channelBMajorVersion) { return channelAMajorVersion > channelBMajorVersion ? 1 : -1; } else if (channelAMinorVersion != channelBMinorVersion) { return channelAMinorVersion > channelBMinorVersion ? 1 : -1; } return 0; } export function getMatchingVersionFromList(versionInfoList: VersionInfo[], versionSpec: string, includePreviewVersions: boolean = false): VersionInfo { let versionList: string[] = []; versionInfoList.forEach(versionInfo => { if (versionInfo && versionInfo.version) { versionList.push(versionInfo.version); } }); if (versionList.length > 0) { let matchedVersion = semver.maxSatisfying(versionList, versionSpec, { includePrerelease: includePreviewVersions }); if (matchedVersion) { return versionInfoList.find(versionInfo => { return versionInfo.version == matchedVersion }); } } return null; } export const Constants = { "sdk": "sdk", "runtime": "runtime", "relativeRuntimePath": path.join("shared", "Microsoft.NETCore.App"), "relativeSdkPath": "sdk", "relativeGlobalToolPath": path.join(".dotnet", "tools") }
versionCompareFunction
push_row.rs
use syn::{parse_quote, Generics, Ident, ItemFn, Path}; pub struct Args<'a> { pub root: &'a Path, pub generics: &'a Generics, pub origin_struct_name: &'a Ident, } pub fn make(args: Args) -> ItemFn { let Args { root, generics, origin_struct_name, } = args; let (_, ty_generics, _) = generics.split_for_impl(); parse_quote! { /// Pushes a row to the end of the table pub fn push_row<__RowData: ::core::convert::Into<#origin_struct_name #ty_generics>>( &mut self,
) { self.insert_row(#root::Table::row_cnt(&self.0), data) } } }
data: __RowData,
main.go
package main import ( _ "github.com/dollarkillerx/govcl/pkgs/winappres" "github.com/dollarkillerx/govcl/vcl" ) func main()
{ vcl.Application.Initialize() vcl.Application.SetMainFormOnTaskBar(true) vcl.Application.CreateForm(&Form1) vcl.Application.Run() }
index.ts
import select from 'select-dom'; import onetime from 'onetime'; import elementReady from 'element-ready'; import compareVersions from 'tiny-version-compare'; import * as pageDetect from 'github-url-detection'; // This never changes, so it can be cached here export const getUsername = onetime(pageDetect.utils.getUsername); export const {getRepositoryInfo: getRepo, getCleanPathname} = pageDetect.utils; export const getConversationNumber = (): string | undefined => { if (pageDetect.isPR() || pageDetect.isIssue()) { return location.pathname.split('/')[4]; } return undefined; }; /** Tested on isRepoTree, isBlame, isSingleFile, isEditFile, isSingleCommit, isCommitList, isCompare. Subtly incompatible with isPR Example tag content on public repositories: https://github.com/sindresorhus/refined-github/commits/branch-or-commit/even/with/slashes.atom Example tag content on private repositories https://github.com/private/private/commits/master.atom?token=AEAXKWNRHXA2XJ2ZWCMGUUN44LM62 */ export const getCurrentBranch = (): string | undefined => { // .last needed for #2799 const feedLink = select.last('link[type="application/atom+xml"]'); // The feedLink is not available on `isIssue` #3641 if (!feedLink) { return; } return new URL(feedLink.href) .pathname .split('/') .slice(4) // Drops the initial /user/repo/route/ part .join('/') .replace(/\.atom$/, ''); }; export const isFirefox = navigator.userAgent.includes('Firefox/'); // The type requires at least one parameter https://stackoverflow.com/a/49910890 export const buildRepoURL = (...pathParts: Array<string | number> & {0: string}): string => { for (const part of pathParts) { // TODO: Can TypeScript take care of this? With https://devblogs.microsoft.com/typescript/announcing-typescript-4-1-beta/#template-literal-types if (typeof part === 'string' && /^\/|\/$/.test(part)) { throw new TypeError('The path parts shouldn’t start or end with a slash: ' + part); } } return [location.origin, getRepo()?.nameWithOwner, ...pathParts].join('/'); }; export const getPRHeadRepo = (): ReturnType<typeof getRepo> => { const headLink = select('.commit-ref.head-ref a'); return getRepo(headLink); }; export function getForkedRepo(): string | undefined { return select('meta[name="octolytics-dimension-repository_parent_nwo"]')?.content; } export const parseTag = (tag: string): {version: string; namespace: string} => { const [, namespace = '', version = ''] = /(?:(.*)@)?([^@]+)/.exec(tag) ?? []; return {namespace, version}; }; export function compareNames(username: string, realname: string): boolean { return username.replace(/-/g, '').toLowerCase() === realname.normalize('NFD').replace(/[\u0300-\u036F\W.]/g, '').toLowerCase(); } const validVersion = /^[vr]?\d+(?:\.\d+)+/; const isPrerelease = /^[vr]?\d+(?:\.\d+)+(-\d)/; export function getLatestVersionTag(tags: string[]): string { // Some tags aren't valid versions; comparison is meaningless. // Just use the latest tag returned by the API (reverse chronologically-sorted list) if (!tags.every(tag => validVersion.test(tag))) { return tags[0]; } // Exclude pre-releases let releases = tags.filter(tag => !isPrerelease.test(tag)); if (releases.length === 0) { // They were all pre-releases; undo. releases = tags; } let latestVersion = releases[0]; for (const release of releases) { if (compareVersions(latestVersion, release) < 0) { latestVersion = release; } } return latestVersion; } const escapeRegex = (string: string): string => string.replace(/[\\^$.*+?()[\]{}|]/g, '\\$&'); const prCommitPathnameRegex = /[/][^/]+[/][^/]+[/]pull[/](\d+)[/]commits[/]([\da-f]{7})[\da-f]{33}(?:#[\w-]+)?\b/; // eslint-disable-line unicorn/better-regex export const prCommitUrlRegex = new RegExp('\\b' + escapeRegex(location.origin) + prCommitPathnameRegex.source, 'gi'); const prComparePathnameRegex = /[/][^/]+[/][^/]+[/]compare[/](.+)(#diff-[\da-fR-]+)/; // eslint-disable-line unicorn/better-regex export const prCompareUrlRegex = new RegExp('\\b' + escapeRegex(location.origin) + prComparePathnameRegex.source, 'gi'); // To be used as replacer callback in string.replace() export function preventPrCommitLinkLoss(url: string, pr: string, commit: string, index: number, fullText: string): string { if (fullText[index + url.length] === ')') { return url; } return `[\`${commit}\` (#${pr})](${url})`; } // To be used as replacer callback in string.replace() for compare links export function preventPrCompareLinkLoss(url: string, compare: string, hash: string, index: number, fullText: string): string { if (fullText[index + url.length] === ')') { return url; } return `[\`${compare}\`${hash.slice(0, 16)}](${url})`; } // https://github.com/idimetrix/text-case/blob/master/packages/upper-case-first/src/index.ts export function up
nput: string): string { return input.charAt(0).toUpperCase() + input.slice(1).toLowerCase(); } /** Is tag or commit, with elementReady */ export async function isPermalink(): Promise<boolean> { if (/^[\da-f]{40}$/.test(getCurrentBranch()!)) { // It's a commit return true; } await elementReady('[data-hotkey="w"]'); return ( // Pre "Latest commit design updates" /Tag|Tree/.test(select('[data-hotkey="w"] i')?.textContent!) || // Text appears in the branch selector // "Latest commit design updates" select.exists('[data-hotkey="w"] .octicon-tag') // Tags have an icon ); } // Negative function so it can be used directly in `exclude` array export function isNotRefinedGitHubRepo(): boolean { return !location.pathname.startsWith('/sindresorhus/refined-github/'); }
perCaseFirst(i
filterHandlers.ts
import { IFilter } from "@temp/components/Filter"; import { UseNavigatorResult } from "@temp/hooks/useNavigator"; import { ActiveTab, Pagination, Search, Sort } from "@temp/types"; import { GetFilterQueryParam, getFilterQueryParams } from "../filters"; type RequiredParams = ActiveTab & Search & Sort & Pagination; type CreateUrl = (params: RequiredParams) => string;
(query: string) => void ]; function createFilterHandlers< TFilterKeys extends string, TFilters extends object >(opts: { getFilterQueryParam: GetFilterQueryParam<TFilterKeys, TFilters>; navigate: UseNavigatorResult; createUrl: CreateUrl; params: RequiredParams; cleanupFn?: () => void; }): CreateFilterHandlers<TFilterKeys> { const { getFilterQueryParam, navigate, createUrl, params, cleanupFn } = opts; const changeFilters = (filter: IFilter<TFilterKeys>) => { if (!!cleanupFn) { cleanupFn(); } navigate( createUrl({ ...params, ...getFilterQueryParams(filter, getFilterQueryParam), activeTab: undefined }) ); }; const resetFilters = () => { if (!!cleanupFn) { cleanupFn(); } navigate( createUrl({ asc: params.asc, sort: params.sort }) ); }; const handleSearchChange = (query: string) => { if (!!cleanupFn) { cleanupFn(); } navigate( createUrl({ ...params, activeTab: undefined, query }) ); }; return [changeFilters, resetFilters, handleSearchChange]; } export default createFilterHandlers;
type CreateFilterHandlers<TFilterKeys extends string> = [ (filter: IFilter<TFilterKeys>) => void, () => void,
parse.py
import os import json import time import logging from connectors.mongodb.mongohandle import MongoHandle
from twarc import Twarc logging.basicConfig(level=logging.INFO) with open('./config/config.json') as data_file: config = json.load(data_file) logging.info('Finished parsing config.') handle = MongoHandle(config) logging.info('Initialized the Mongo connection.') t = Twarc(config['twitter']['consumer_key'], config['twitter']['consumer_secret'], config['twitter']['access_token'], config['twitter']['access_token_secret']) logging.info('Initialized Twitter connection.') for source_file in os.listdir('./' + config['source_folder']): logging.info('Preparing to hydrate: ' + source_file) tweet_ids = open('./' + config['source_folder'] + '/' + source_file) new_tweet_ids = [] logging.info('Parsing tweet ids.') start = time.time() for line in tweet_ids: line = line.strip() if (not handle.is_written(line)): new_tweet_ids.append(line) end = time.time() logging.info('Finished looking for new tweets in %.2f seconds.' % (end - start)) handle.write(t.hydrate(new_tweet_ids), source_file) tweet_ids.close() logging.info('Finished hydrating: ' + source_file) logging.info('Finished hydration task.') handle.clean()
thrilling.go
package thrilling import ( "fmt" "github.com/genshinsim/gcsim/pkg/core" ) func init() { core.RegisterWeaponFunc("thrilling tales of dragon slayers", weapon) core.RegisterWeaponFunc("thrillingtalesofdragonslayers", weapon) } func weapon(char core.Character, c *core.Core, r int, param map[string]int) string { last := 0 isActive := false c.Events.Subscribe(core.OnInitialize, func(args ...interface{}) bool { isActive = c.ActiveChar == char.CharIndex() return true }, fmt.Sprintf("thrilling-%v", char.Name())) m := make([]float64, core.EndStatType) m[core.ATKP] = .18 + float64(r)*0.06 c.Events.Subscribe(core.OnCharacterSwap, func(args ...interface{}) bool { if !isActive && c.ActiveChar == char.CharIndex() { //swapped to current char isActive = true return false } //swap from current char to new char if isActive && c.ActiveChar != char.CharIndex() { isActive = false //do nothing if off cd if last != 0 && c.F-last < 1200 { return false } //trigger buff if not on cd last = c.F expiry := c.F + 600 active := c.Chars[c.ActiveChar] active.AddMod(core.CharStatMod{ Key: "thrilling tales", Amount: func() ([]float64, bool) { return m, true }, Expiry: expiry, }) c.Log.NewEvent("ttds activated", core.LogWeaponEvent, active.CharIndex(), "expiry", expiry) } return false }, fmt.Sprintf("thrilling-%v", char.Name())) return "thrillingtalesofdragonslayers"
}
cluttered_mnist.py
""" Copyright (c) 2018-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from PIL import Image import numpy as np from .format_converter import BaseFormatConverter, ConverterReturn from ..config import PathField, StringField, BoolField from ..representation import ClassificationAnnotation class ClutteredMNISTConverter(BaseFormatConverter): __provider__ = 'cluttered_mnist' @classmethod def parameters(cls): params = super().parameters() params.update({ 'data_file': PathField(), 'split': StringField(optional=True, default='test', choices=['train', 'valid', 'test']), 'convert_images': BoolField(optional=True, default=True), 'images_dir': PathField(is_directory=True, optional=True) }) return params def configure(self): self.data_file = self.get_value_from_config('data_file') self.split = self.get_value_from_config('split') self.convert_images = self.get_value_from_config('convert_images') self.images_dir = self.get_value_from_config('images_dir') or self.data_file.parent / 'converted_images' if self.convert_images and not self.images_dir.exists(): self.images_dir.mkdir(parents=True) def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs): data = np.load(str(self.data_file)) x_values = data['x_{}'.format(self.split)] y_values = data['y_{}'.format(self.split)] annotations = [] for idx, y in enumerate(y_values): identifier = '{}_{}.png'.format(self.split, idx) y_label = np.argmax(y) if self.convert_images: x = x_values[idx].reshape((60, 60)) * 255 image = Image.fromarray(x) image = image.convert("L") image.save(str(self.images_dir / identifier)) annotations.append(ClassificationAnnotation(identifier, y_label))
return ConverterReturn(annotations, None, None)
startPyquil3243.py
# qubit number=4 # total number=42 import pyquil from pyquil.api import local_forest_runtime, QVMConnection from pyquil import Program, get_qc from pyquil.gates import * import numpy as np conn = QVMConnection() def make_circuit()-> Program: prog = Program() # circuit begin prog += H(3) # number=31 prog += CZ(0,3) # number=32 prog += H(3) # number=33 prog += H(3) # number=30 prog += X(3) # number=11 prog += H(3) # number=13 prog += CZ(0,3) # number=14 prog += H(1) # number=18 prog += CZ(3,1) # number=19 prog += Z(3) # number=25 prog += X(3) # number=35 prog += H(1) # number=20 prog += RX(-3.141592653589793,3) # number=26 prog += H(3) # number=15 prog += H(1) # number=2 prog += H(2) # number=3 prog += H(2) # number=17 prog += H(3) # number=4 prog += H(0) # number=5 prog += H(1) # number=6 prog += H(2) # number=7 prog += H(3) # number=8 prog += H(0) # number=9 prog += H(0) # number=27 prog += CZ(1,0) # number=28 prog += H(0) # number=29 prog += H(0) # number=39 prog += CZ(1,0) # number=40 prog += H(0) # number=41 prog += H(1) # number=36 prog += CZ(2,1) # number=37 prog += H(1) # number=38 prog += X(1) # number=23 prog += X(1) # number=24 # circuit end return prog def
(bitstrings) -> dict: d = {} for l in bitstrings: if d.get(l) is None: d[l] = 1 else: d[l] = d[l] + 1 return d if __name__ == '__main__': prog = make_circuit() qvm = get_qc('4q-qvm') results = qvm.run_and_measure(prog,1024) bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T bitstrings = [''.join(map(str, l)) for l in bitstrings] writefile = open("../data/startPyquil3243.csv","w") print(summrise_results(bitstrings),file=writefile) writefile.close()
summrise_results
parse_array.go
// Copyright 2016 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. package parser import ( "bytes" "strings" "unicode" "unicode/utf8" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" ) var enclosingError = pgerror.NewErrorf(pgerror.CodeInvalidTextRepresentationError, "array must be enclosed in { and }") var extraTextError = pgerror.NewErrorf(pgerror.CodeInvalidTextRepresentationError, "extra text after closing right brace") var nestedArraysNotSupportedError = pgerror.NewErrorf(pgerror.CodeFeatureNotSupportedError, "nested arrays not supported") var malformedError = pgerror.NewErrorf(pgerror.CodeInvalidTextRepresentationError, "malformed array") var isQuoteChar = func(ch byte) bool { return ch == '"' } var isControlChar = func(ch byte) bool { return ch == '{' || ch == '}' || ch == ',' || ch == '"' } var isElementChar = func(r rune) bool { return r != '{' && r != '}' && r != ',' } // gobbleString advances the parser for the remainder of the current string // until it sees a non-escaped termination character, as specified by // isTerminatingChar, returning the resulting string, not including the // termination character. func (p *parseState) gobbleString(isTerminatingChar func(ch byte) bool) (out string, err error) { var result bytes.Buffer start := 0 i := 0 for i < len(p.s) && !isTerminatingChar(p.s[i]) { // In these strings, we just encode directly the character following a // '\', even if it would normally be an escape sequence. if i < len(p.s) && p.s[i] == '\\' { result.WriteString(p.s[start:i]) i++ if i < len(p.s) { result.WriteByte(p.s[i]) i++ } start = i } else { i++ } } if i >= len(p.s) { return "", malformedError } result.WriteString(p.s[start:i]) p.s = p.s[i:] return result.String(), nil } type parseState struct { s string evalCtx *EvalContext result *DArray t ColumnType } func (p *parseState) advance() { _, l := utf8.DecodeRuneInString(p.s) p.s = p.s[l:] } func (p *parseState) eatWhitespace() { for unicode.IsSpace(p.peek()) { p.advance() } } func (p *parseState) peek() rune { r, _ := utf8.DecodeRuneInString(p.s) return r } func (p *parseState) eof() bool { return len(p.s) == 0 } func (p *parseState) parseQuotedString() (string, error) { return p.gobbleString(isQuoteChar) } func (p *parseState) parseUnquotedString() (string, error) { out, err := p.gobbleString(isControlChar) if err != nil { return "", err } return strings.TrimSpace(out), nil } func (p *parseState) parseElement() error { var next string var err error r := p.peek() switch r { case '{': return nestedArraysNotSupportedError case '"': p.advance() next, err = p.parseQuotedString() if err != nil { return err } p.advance() default: if !isElementChar(r) { return malformedError } next, err = p.parseUnquotedString() if err != nil { return err } if strings.EqualFold(next, "null") { return p.result.Append(DNull) } } d, err := performCast(p.evalCtx, NewDString(next), p.t) if err != nil { return err } return p.result.Append(d) } // StringToColType returns a column type given a string representation of the // type. Used by dump. func StringToColType(s string) (ColumnType, error) { switch s { case "BOOL": return boolColTypeBool, nil case "INT": return intColTypeInt, nil case "FLOAT": return floatColTypeFloat, nil case "DECIMAL": return decimalColTypeDecimal, nil case "TIMESTAMP": return timestampColTypeTimestamp, nil case "TIMESTAMPTZ", "TIMESTAMP WITH TIME ZONE": return timestampTzColTypeTimestampWithTZ, nil case "INTERVAL": return intervalColTypeInterval, nil case "UUID": return uuidColTypeUUID, nil case "INET": return ipnetColTypeINet, nil case "DATE": return dateColTypeDate, nil case "STRING": return stringColTypeString, nil case "NAME": return nameColTypeName, nil case "BYTES": return bytesColTypeBytes, nil default: return nil, pgerror.NewErrorf(pgerror.CodeInternalError, "unexpected column type %s", s) } } // ParseDArrayFromString parses the string-form of constructing arrays, handling // cases such as `'{1,2,3}'::INT[]`. func
(evalCtx *EvalContext, s string, t ColumnType) (*DArray, error) { parser := parseState{ s: s, evalCtx: evalCtx, result: NewDArray(CastTargetToDatumType(t)), t: t, } parser.eatWhitespace() if parser.peek() != '{' { return nil, enclosingError } parser.advance() parser.eatWhitespace() if parser.peek() != '}' { if err := parser.parseElement(); err != nil { return nil, err } parser.eatWhitespace() for parser.peek() == ',' { parser.advance() parser.eatWhitespace() if err := parser.parseElement(); err != nil { return nil, err } } } parser.eatWhitespace() if parser.eof() { return nil, enclosingError } if parser.peek() != '}' { return nil, malformedError } parser.advance() parser.eatWhitespace() if !parser.eof() { return nil, extraTextError } return parser.result, nil }
ParseDArrayFromString
form-control.spec.ts
import { ControlValidatorValidationType } from './control-validator-validation-type'; import { FormControl } from './form-control'; const ALWAYS_FALSE: string = 'Always false'; describe('FromControl', () => { let formControl: FormControl<any>; describe('given a FormControl with no validation', () => { beforeAll(() => { formControl = new FormControl<string>([], { initialValue: 'foo' }); }); it('should be empty of control and valid', () => { expect(formControl.valid).toBe(true); expect(formControl.hasError()).toBe(false); expect(formControl.waiting).toBe(false); expect(formControl.enabled).toBe(true); expect(formControl.readonly).toBe(false); expect(formControl.pristine).toBe(true); expect(formControl.touched).toBe(false); expect(formControl.value).toBe('foo'); }); describe('when edited', () => { beforeAll(() => { formControl.initEdition(); formControl.value = 'test'; formControl.endEdition(); }); it('should run validation and set flag', () => { expect(formControl.valid).toBe(true); expect(formControl.hasError()).toBe(false); expect(formControl.waiting).toBe(false); expect(formControl.pristine).toBe(false); expect(formControl.touched).toBe(true); expect(formControl.value).toBe('test'); }); it('should reset to inital value on reset', () => { formControl.reset(); expect(formControl.valid).toBe(true); expect(formControl.hasError()).toBe(false); expect(formControl.waiting).toBe(false); expect(formControl.enabled).toBe(true); expect(formControl.readonly).toBe(false); expect(formControl.pristine).toBe(true); expect(formControl.touched).toBe(false); expect(formControl.value).toBe('foo'); }); }); }); describe('given a FormControl with a always false OnGoing validation', () => { beforeAll(() => { formControl = new FormControl<string>( [{ key: ALWAYS_FALSE, validationFunction: (control: FormControl<string>) => { return false; }, error: { message: ALWAYS_FALSE }, validationType: ControlValidatorValidationType.OnGoing }] ); }); it('should be invalid and no error', async () => { expect(formControl.valid).toBe(false); expect(formControl.hasError()).toBe(false); expect(formControl.errorMessage).toBe(''); formControl.validate(); expect(formControl.hasError()).toBe(true); }); describe('when edited', () => { beforeAll(() => { formControl.initEdition(); formControl.value = 'test'; formControl.endEdition(); }); it('should be invalid and has error', () => { expect(formControl.valid).toBe(false); expect(formControl.hasError()).toBe(true); expect(formControl.errorMessage).toBe(ALWAYS_FALSE); }); it('should reset message inital value on reset', () => { formControl.reset(); expect(formControl.valid).toBe(false); expect(formControl.hasError()).toBe(false); expect(formControl.errorMessage).toBe(''); }); it('should be valid when readonly', () => { formControl.readonly = true; expect(formControl.valid).toBe(true); expect(formControl.hasError()).toBe(false); expect(formControl.errorMessage).toBe(''); }); it('should be valid when disabled', () => { formControl.enabled = false; expect(formControl.valid).toBe(true); expect(formControl.hasError()).toBe(false); expect(formControl.errorMessage).toBe(''); }); }); }); describe('Given value is a primitive', () => { beforeEach(() => { formControl = new FormControl<string>([], { initialValue: 'foo' }); }); describe('When value has changed', () => { beforeEach(() => { formControl.value = 'bar'; }); it('should have a different value than the initial value', () => { expect(formControl.value).toEqual('bar'); }); it('on reset, should reset to correct the correct initial value', () => { formControl.reset(); expect(formControl.value).toEqual('foo'); }); }); }); describe('Given value is an array', () => { let array: string[]; beforeEach(() => { array = ['Alice']; formControl = new FormControl<string[]>([], { initialValue: array }); }); describe('When value has changed', () => { beforeEach(() => { array.push('Bob'); array.splice(0, 1); formControl.value = array; }); it('should have a different value than the initial value', () => { expect(formControl.value).toEqual(['Bob']); }); it('on reset, should reset to correct the correct initial value', () => { formControl.reset(); expect(formControl.value).toEqual(['Alice']); }); }); }); describe('Given value is an object', () => { class
{ foo: string; bar: string; constructor(foo: string, bar: string) { this.foo = foo; this.bar = bar; } } const vieilleClasse: MyClass = new MyClass('Alice', 'Bob'); const nouvelleClasse: MyClass = new MyClass('Charlie', 'David'); beforeEach(() => { formControl = new FormControl<MyClass>([], { initialValue: vieilleClasse }); }); describe('When value has changed', () => { beforeEach(() => { formControl.value = nouvelleClasse; }); it('should have a different value than the initial value', () => { expect(formControl.value).toEqual(nouvelleClasse); }); it('on reset, should reset to correct the correct initial value', () => { formControl.reset(); expect(formControl.value).toEqual(vieilleClasse); }); it('should return the object of the same class', () => { formControl.reset(); expect(formControl.value instanceof MyClass).toBe(true); }); }); describe('When a property on the value has changed', () => { beforeEach(() => { formControl.value.bar = 'Eve'; }); it('should have a different value than the initial value', () => { expect(formControl.value).toEqual(new MyClass('Alice', 'Eve')); }); it('on reset, should reset to correct the correct initial value', () => { formControl.reset(); expect(formControl.value).toEqual(new MyClass('Alice', 'Bob')); }); }); }); });
MyClass
database.config.ts
import mongoose from "mongoose"; import { EnvironmentConfig } from "./environment.config"; import * as logger from "../services/helper/logger"; export class DatabaseConfig { private static instance: DatabaseConfig; public static database: mongoose.Connection; public static instantiate(): DatabaseConfig { if (!DatabaseConfig.instance) { DatabaseConfig.instance = new DatabaseConfig(); } return DatabaseConfig.instance; } public static connect = () => { const uri = `mongodb://${EnvironmentConfig.DATABASE_USERNAME}:${EnvironmentConfig.DATABASE_PASSWORD}@${EnvironmentConfig.DATABASE_HOST}:${EnvironmentConfig.DATABASE_PORT}/${EnvironmentConfig.DATABASE_NAME}?authSource=admin`; if (DatabaseConfig.database) { return; } mongoose.connect(uri); DatabaseConfig.database = mongoose.connection; DatabaseConfig.database.once("open", async () => { logger.info("Connected to database");
}); }; public static disconnect = () => { if (!DatabaseConfig.database) { return; } mongoose.disconnect(); }; }
}); DatabaseConfig.database.on("error", () => { logger.info("Error connecting to database");
test_std.rs
#[cfg(test)] mod tests { use crate::util::alloc::*; use rkyv::{ archived_root, ser::{serializers::WriteSerializer, Serializer}, Archive, Deserialize, Serialize, }; use std::collections::HashMap; #[cfg(feature = "wasm")] use wasm_bindgen_test::*; #[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] fn write_serializer() { #[derive(Archive, Serialize)] #[archive_attr(repr(C, packed))] struct Example { x: i32, } let mut buf = [0u8; 3]; let mut ser = WriteSerializer::new(&mut buf[..]); let foo = Example { x: 100 }; ser.serialize_value(&foo) .expect_err("serialized to an undersized buffer must fail"); } #[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] fn archive_hash_map() { #[cfg(not(any(feature = "archive_le", feature = "archive_be")))] { test_archive(&HashMap::<i32, i32>::new()); let mut hash_map = HashMap::new(); hash_map.insert(1, 2); hash_map.insert(3, 4); hash_map.insert(5, 6); hash_map.insert(7, 8); test_archive(&hash_map); } let mut hash_map = HashMap::new(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); let mut serializer = DefaultSerializer::default(); serializer.serialize_value(&hash_map).unwrap(); let buf = serializer.into_serializer().into_inner(); let archived_value = unsafe { archived_root::<HashMap<String, String>>(buf.as_ref()) }; assert_eq!(archived_value.len(), hash_map.len()); for (key, value) in hash_map.iter() { assert!(archived_value.contains_key(key.as_str())); assert_eq!(&archived_value[key.as_str()], value); } for (key, value) in archived_value.iter() { assert!(hash_map.contains_key(key.as_str())); assert_eq!(&hash_map[key.as_str()], value); } } #[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] #[allow(deprecated)] fn archive_hash_map_hasher() { use std::collections::HashMap; test_archive(&HashMap::<i8, i32, ahash::RandomState>::default()); let mut hash_map: HashMap<i8, _, ahash::RandomState> = HashMap::default(); hash_map.insert(1, 2); hash_map.insert(3, 4); hash_map.insert(5, 6); hash_map.insert(7, 8); test_archive(&hash_map); let mut hash_map: HashMap<_, _, ahash::RandomState> = HashMap::default(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); let mut serializer = DefaultSerializer::default(); serializer.serialize_value(&hash_map).unwrap(); let buf = serializer.into_serializer().into_inner(); let archived_value = unsafe { archived_root::<HashMap<String, String, ahash::RandomState>>(buf.as_ref()) }; assert_eq!(archived_value.len(), hash_map.len()); for (key, value) in hash_map.iter() { assert!(archived_value.contains_key(key.as_str())); assert_eq!(&archived_value[key.as_str()], value); } for (key, value) in archived_value.iter() { assert!(hash_map.contains_key(key.as_str())); assert_eq!(&hash_map[key.as_str()], value); } } #[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] fn archive_net() { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[archive(compare(PartialEq))] #[archive_attr(derive(Debug))] struct TestNet { ipv4: Ipv4Addr, ipv6: Ipv6Addr, ip: IpAddr, sockv4: SocketAddrV4, sockv6: SocketAddrV6, sock: SocketAddr, } let value = TestNet { ipv4: Ipv4Addr::new(31, 41, 59, 26), ipv6: Ipv6Addr::new(31, 41, 59, 26, 53, 58, 97, 93), ip: IpAddr::V4(Ipv4Addr::new(31, 41, 59, 26)), sockv4: SocketAddrV4::new(Ipv4Addr::new(31, 41, 59, 26), 5358), sockv6: SocketAddrV6::new(Ipv6Addr::new(31, 31, 59, 26, 53, 58, 97, 93), 2384, 0, 0), sock: SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::new(31, 31, 59, 26, 53, 58, 97, 93), 2384, 0, 0, )), };
#[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] fn c_string() { use std::ffi::CString; let value = unsafe { CString::from_vec_unchecked("hello world".to_string().into_bytes()) }; test_archive(&value); } // TODO: figure out errors // #[test] // #[cfg_attr(feature = "wasm", wasm_bindgen_test)] // fn mutex() { // use rkyv::with::Lock; // use std::sync::Mutex; // #[derive(Archive, Serialize, Deserialize)] // struct Test { // #[with(Lock)] // value: Mutex<i32>, // } // let value = Test { // value: Mutex::new(10), // }; // let mut serializer = AlignedSerializer::new(AlignedVec::new()); // serializer.serialize_value(&value).unwrap(); // let result = serializer.into_inner(); // let archived = unsafe { archived_root::<Test>(result.as_slice()) }; // assert_eq!(*archived.value, 10); // let deserialized: Test = archived.deserialize(&mut Infallible).unwrap(); // assert_eq!(*deserialized.value.lock().unwrap(), 10); // } // #[test] // #[cfg_attr(feature = "wasm", wasm_bindgen_test)] // fn rwlock() { // use rkyv::with::Lock; // use std::sync::RwLock; // #[derive(Archive, Serialize, Deserialize)] // struct Test { // #[with(Lock)] // value: RwLock<i32>, // } // let value = Test { // value: RwLock::new(10), // }; // let mut serializer = AlignedSerializer::new(AlignedVec::new()); // serializer.serialize_value(&value).unwrap(); // let result = serializer.into_inner(); // let archived = unsafe { archived_root::<Test>(result.as_slice()) }; // assert_eq!(*archived.value, 10); // let deserialized: Test = archived.deserialize(&mut Infallible).unwrap(); // assert_eq!(*deserialized.value.read().unwrap(), 10); // } // #[test] // #[cfg_attr(feature = "wasm", wasm_bindgen_test)] // fn os_string() { // use rkyv::with::ToString; // use core::str::FromStr; // use std::ffi::OsString; // #[derive(Archive, Serialize, Deserialize)] // struct Test { // #[with(ToString)] // value: OsString, // } // let value = Test { // value: OsString::from_str("hello world").unwrap(), // }; // let mut serializer = AlignedSerializer::new(AlignedVec::new()); // serializer.serialize_value(&value).unwrap(); // let result = serializer.into_inner(); // let archived = unsafe { archived_root::<Test>(result.as_slice()) }; // assert_eq!(archived.value, "hello world"); // let deserialized: Test = archived.deserialize(&mut Infallible).unwrap(); // assert_eq!(deserialized.value, "hello world"); // } // #[test] // #[cfg_attr(feature = "wasm", wasm_bindgen_test)] // fn path_buf() { // use rkyv::with::ToString; // use core::str::FromStr; // use std::path::PathBuf; // #[derive(Archive, Serialize, Deserialize)] // struct Test { // #[with(ToString)] // value: PathBuf, // } // let value = Test { // value: PathBuf::from_str("hello world").unwrap(), // }; // let mut serializer = AlignedSerializer::new(AlignedVec::new()); // serializer.serialize_value(&value).unwrap(); // let result = serializer.into_inner(); // let archived = unsafe { archived_root::<Test>(result.as_slice()) }; // assert_eq!(archived.value, "hello world"); // let deserialized: Test = archived.deserialize(&mut Infallible).unwrap(); // assert_eq!(deserialized.value.to_str().unwrap(), "hello world"); // } #[test] #[cfg_attr(feature = "wasm", wasm_bindgen_test)] // Don't run these tests with non-native endianness because ArchivedHashMap won't have // PartialEq<HashMap> #[cfg(not(any(feature = "archive_le", feature = "archive_be")))] fn archive_zst_containers() { use std::collections::HashSet; #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[archive(compare(PartialEq))] #[archive_attr(derive(Debug))] struct MyZST; let mut value = HashMap::new(); value.insert(0, ()); value.insert(1, ()); test_archive(&value); let mut value = HashMap::new(); value.insert((), 10); test_archive(&value); let mut value = HashMap::new(); value.insert((), ()); test_archive(&value); let mut value = HashSet::new(); value.insert(()); test_archive(&value); } }
test_archive(&value); }
error.rs
use failure::Fail; use std::io; /// Error type for riam #[derive(Fail, Debug)] pub enum RiamError { /// IO error #[fail(display = "IO error: {}", _0)] Io(#[cause] io::Error), /// Serialization or deserialization error #[fail(display = "serde_json error: {}", _0)] Serde(#[cause] serde_json::Error), /// Invalid policy /// The policy is not well formed. #[fail(display = "Invalid policy")] InvalidPolicy, /// NonExistant policy /// The policy does not exist #[fail(display = "Invalid policy")] UnknownPolicy, } impl From<io::Error> for RiamError { fn from(err: io::Error) -> RiamError { RiamError::Io(err) } } impl From<serde_json::Error> for RiamError { fn from(err: serde_json::Error) -> RiamError
} /// Result type for riam pub type Result<T> = std::result::Result<T, RiamError>;
{ RiamError::Serde(err) }
consensus_pool.rs
use crate::backup::Backup; use crate::{ consensus_pool_cache::{ get_highest_catch_up_package, get_highest_finalized_block, update_summary_block, ConsensusCacheImpl, }, inmemory_pool::InMemoryPoolSection, metrics::{LABEL_POOL_TYPE, POOL_TYPE_UNVALIDATED, POOL_TYPE_VALIDATED}, }; use ic_config::artifact_pool::{ArtifactPoolConfig, PersistentPoolBackend}; use ic_consensus_message::ConsensusMessageHashable; use ic_interfaces::{ consensus_pool::{ ChangeAction, ChangeSet, ConsensusPool, ConsensusPoolCache, HeightIndexedPool, HeightRange, MutableConsensusPool, PoolSection, UnvalidatedConsensusArtifact, ValidatedConsensusArtifact, }, gossip_pool::{ConsensusGossipPool, GossipPool}, time_source::TimeSource, }; use ic_logger::ReplicaLogger; use ic_types::{ artifact::ConsensusMessageId, consensus::catchup::CUPWithOriginalProtobuf, consensus::*, Height, SubnetId, Time, }; use prometheus::{labels, opts, IntGauge}; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; // The maximum age backup artifacts can reach before purging. const BACKUP_RETENTION_TIME_SECS: Duration = Duration::from_secs(24 * 60 * 60); // Time interval between purges. const BACKUP_PURGING_INTERVAL_SEC: Duration = Duration::from_secs(60 * 60); #[derive(Debug, Clone)] pub enum PoolSectionOp<T> { Insert(T), Remove(ConsensusMessageId), PurgeBelow(Height), // Non-inclusive } #[derive(Clone, Debug, Default)] pub struct PoolSectionOps<T> { pub ops: Vec<PoolSectionOp<T>>, } impl<T> PoolSectionOps<T> { pub fn new() -> PoolSectionOps<T> { PoolSectionOps { ops: Vec::new() } } pub fn insert(&mut self, artifact: T) { self.ops.push(PoolSectionOp::Insert(artifact)); } pub fn remove(&mut self, msg_id: ConsensusMessageId) { self.ops.push(PoolSectionOp::Remove(msg_id)); } pub fn purge_below(&mut self, height: Height) { self.ops.push(PoolSectionOp::PurgeBelow(height)); } } pub trait InitializablePoolSection: MutablePoolSection<ValidatedConsensusArtifact> { fn insert_cup_with_proto(&self, cup_with_proto: CUPWithOriginalProtobuf); } pub trait MutablePoolSection<T>: PoolSection<T> { fn mutate(&mut self, ops: PoolSectionOps<T>); fn pool_section(&self) -> &dyn PoolSection<T>; } struct PerTypeMetrics<T> { max_height: prometheus::IntGauge, min_height: prometheus::IntGauge, phantom: PhantomData<T>, } const LABEL_TYPE: &str = "type"; const LABEL_STAT: &str = "stat"; impl<T> PerTypeMetrics<T> { fn new(registry: &ic_metrics::MetricsRegistry, pool_portion: &str, type_name: &str) -> Self { const NAME: &str = "artifact_pool_consensus_height_stat"; const HELP: &str = "The height of objects in a consensus pool, by pool type, object type and stat"; Self { max_height: registry.register( IntGauge::with_opts(opts!( NAME, HELP, labels! {LABEL_POOL_TYPE => pool_portion, LABEL_TYPE => type_name, LABEL_STAT => "max"} )) .unwrap(), ), min_height: registry.register( IntGauge::with_opts(opts!( NAME, HELP, labels! {LABEL_POOL_TYPE => pool_portion, LABEL_TYPE => type_name, LABEL_STAT => "min"} )) .unwrap(), ), phantom: PhantomData, } } fn
(&self, index: &dyn HeightIndexedPool<T>) { let (min, max) = index .height_range() .map_or((-1, -1), |r| (r.min.get() as i64, r.max.get() as i64)); if min >= 0 { self.min_height.set(min); } if max >= 0 { self.max_height.set(max); } } } struct PoolMetrics { random_beacon: PerTypeMetrics<RandomBeacon>, random_tape: PerTypeMetrics<RandomTape>, finalization: PerTypeMetrics<Finalization>, notarization: PerTypeMetrics<Notarization>, catch_up_package: PerTypeMetrics<CatchUpPackage>, block_proposal: PerTypeMetrics<BlockProposal>, random_beacon_share: PerTypeMetrics<RandomBeaconShare>, random_tape_share: PerTypeMetrics<RandomTapeShare>, notarization_share: PerTypeMetrics<NotarizationShare>, finalization_share: PerTypeMetrics<FinalizationShare>, catch_up_package_share: PerTypeMetrics<CatchUpPackageShare>, total_size: prometheus::IntGauge, } impl PoolMetrics { fn new(registry: ic_metrics::MetricsRegistry, pool_portion: &str) -> Self { Self { random_beacon: PerTypeMetrics::new(&registry, pool_portion, "random_beacon"), random_tape: PerTypeMetrics::new(&registry, pool_portion, "random_tape"), finalization: PerTypeMetrics::new(&registry, pool_portion, "finalization"), notarization: PerTypeMetrics::new(&registry, pool_portion, "notarization"), catch_up_package: PerTypeMetrics::new(&registry, pool_portion, "catch_up_package"), block_proposal: PerTypeMetrics::new(&registry, pool_portion, "block_proposal"), random_beacon_share: PerTypeMetrics::new( &registry, pool_portion, "random_beacon_share", ), random_tape_share: PerTypeMetrics::new(&registry, pool_portion, "random_tape_share"), notarization_share: PerTypeMetrics::new(&registry, pool_portion, "notarization_share"), finalization_share: PerTypeMetrics::new(&registry, pool_portion, "finalization_share"), catch_up_package_share: PerTypeMetrics::new( &registry, pool_portion, "catch_up_package_share", ), total_size: registry.register( IntGauge::with_opts(opts!( "consensus_pool_size", "The total size of a consensus pool", labels! {LABEL_POOL_TYPE => pool_portion} )) .unwrap(), ), } } fn update<T>(&mut self, pool_section: &dyn PoolSection<T>) { self.random_beacon .update_from_height_indexed_pool(pool_section.random_beacon()); self.random_tape .update_from_height_indexed_pool(pool_section.random_tape()); self.finalization .update_from_height_indexed_pool(pool_section.finalization()); self.notarization .update_from_height_indexed_pool(pool_section.notarization()); self.catch_up_package .update_from_height_indexed_pool(pool_section.catch_up_package()); self.block_proposal .update_from_height_indexed_pool(pool_section.block_proposal()); self.random_beacon_share .update_from_height_indexed_pool(pool_section.random_beacon_share()); self.random_tape_share .update_from_height_indexed_pool(pool_section.random_tape_share()); self.notarization_share .update_from_height_indexed_pool(pool_section.notarization_share()); self.finalization_share .update_from_height_indexed_pool(pool_section.finalization_share()); self.catch_up_package_share .update_from_height_indexed_pool(pool_section.catch_up_package_share()); self.total_size.set(pool_section.size() as i64) } } pub struct ConsensusPoolImpl { validated: Box<dyn InitializablePoolSection + Send + Sync>, unvalidated: Box<dyn MutablePoolSection<UnvalidatedConsensusArtifact> + Send + Sync>, validated_metrics: PoolMetrics, unvalidated_metrics: PoolMetrics, cache: Arc<ConsensusCacheImpl>, backup: Option<Backup>, } // A temporary pool implementation used for genesis initialization. pub struct UncachedConsensusPoolImpl { pub validated: Box<dyn InitializablePoolSection + Send + Sync>, unvalidated: Box<dyn MutablePoolSection<UnvalidatedConsensusArtifact> + Send + Sync>, } impl UncachedConsensusPoolImpl { pub fn new(config: ArtifactPoolConfig, log: ReplicaLogger) -> UncachedConsensusPoolImpl { let validated = match config.persistent_pool_backend { PersistentPoolBackend::LMDB(lmdb_config) => Box::new( crate::lmdb_pool::PersistentHeightIndexedPool::new_consensus_pool( lmdb_config, config.persistent_pool_read_only, log.clone(), ), ) as Box<_>, PersistentPoolBackend::RocksDB(config) => Box::new( crate::rocksdb_pool::PersistentHeightIndexedPool::new_consensus_pool( config, log.clone(), ), ) as Box<_>, }; UncachedConsensusPoolImpl { validated, unvalidated: Box::new(InMemoryPoolSection::new(log)), } } } impl ConsensusPoolCache for UncachedConsensusPoolImpl { fn finalized_block(&self) -> Block { get_highest_finalized_block(self, &self.catch_up_package()) } fn consensus_time(&self) -> Option<Time> { let block = self.finalized_block(); if block.height() == Height::from(0) { None } else { Some(block.context.time) } } fn catch_up_package(&self) -> CatchUpPackage { get_highest_catch_up_package(self).cup } fn cup_with_protobuf(&self) -> CUPWithOriginalProtobuf { get_highest_catch_up_package(self) } fn summary_block(&self) -> Block { let finalized_block = get_highest_finalized_block(self, &self.catch_up_package()); let mut summary_block = self.catch_up_package().content.block.into_inner(); update_summary_block(self, &mut summary_block, &finalized_block); summary_block } } impl ConsensusPool for UncachedConsensusPoolImpl { fn validated(&self) -> &dyn PoolSection<ValidatedConsensusArtifact> { self.validated.pool_section() } fn unvalidated(&self) -> &dyn PoolSection<UnvalidatedConsensusArtifact> { self.unvalidated.pool_section() } fn as_cache(&self) -> &dyn ConsensusPoolCache { self } } impl ConsensusPoolImpl { /// Create a consensus pool from a given `config`, and initialize it with /// the given `catch_up_package`. If a catch-up package already exists in /// the validated pool, the one that is greater (with respect to /// height and registry version) will be used. pub fn new( subnet_id: SubnetId, catch_up_package: CUPWithOriginalProtobuf, config: ArtifactPoolConfig, registry: ic_metrics::MetricsRegistry, log: ReplicaLogger, ) -> ConsensusPoolImpl { let mut pool = UncachedConsensusPoolImpl::new(config.clone(), log.clone()); Self::init_genesis(catch_up_package, pool.validated.as_mut()); let mut pool = Self::from_uncached(pool, registry.clone()); // If the back up directory is set, instantiate the backup component // and create a subdirectory with the subnet id as directory name. pool.backup = config.backup_spool_path.map(|path| { Backup::new( &pool, path.clone(), path.join(subnet_id.to_string()) .join(ic_types::ReplicaVersion::default().to_string()), BACKUP_RETENTION_TIME_SECS, BACKUP_PURGING_INTERVAL_SEC, registry, log, ) }); pool } fn init_genesis(cup: CUPWithOriginalProtobuf, pool_section: &mut dyn InitializablePoolSection) { let should_insert = match pool_section.catch_up_package().get_highest() { Ok(existing) => CatchUpPackageParam::from(&cup) > CatchUpPackageParam::from(&existing), Err(_) => true, }; if should_insert { let mut ops = PoolSectionOps::new(); ops.insert(ValidatedConsensusArtifact { msg: cup.cup.content.random_beacon.as_ref().clone().to_message(), timestamp: cup.cup.content.block.as_ref().context.time, }); pool_section.mutate(ops); pool_section.insert_cup_with_proto(cup); } } /// Can be used to instantiate an empty pool without a CUP. pub fn from_uncached( uncached: UncachedConsensusPoolImpl, registry: ic_metrics::MetricsRegistry, ) -> ConsensusPoolImpl { let cache = Arc::new(ConsensusCacheImpl::new(&uncached)); ConsensusPoolImpl { validated: uncached.validated, unvalidated: uncached.unvalidated, validated_metrics: PoolMetrics::new(registry.clone(), POOL_TYPE_VALIDATED), unvalidated_metrics: PoolMetrics::new(registry, POOL_TYPE_UNVALIDATED), cache, backup: None, } } pub fn new_from_cup_without_bytes( subnet_id: SubnetId, catch_up_package: CatchUpPackage, config: ArtifactPoolConfig, registry: ic_metrics::MetricsRegistry, log: ReplicaLogger, ) -> ConsensusPoolImpl { Self::new( subnet_id, CUPWithOriginalProtobuf::from_cup(catch_up_package), config, registry, log, ) } /// Get a copy of ConsensusPoolCache. pub fn get_cache(&self) -> Arc<dyn ConsensusPoolCache> { Arc::clone(&self.cache) as Arc<_> } fn apply_changes_validated(&mut self, ops: PoolSectionOps<ValidatedConsensusArtifact>) { if !ops.ops.is_empty() { self.validated.mutate(ops); self.validated_metrics.update(self.validated.pool_section()); } } fn apply_changes_unvalidated(&mut self, ops: PoolSectionOps<UnvalidatedConsensusArtifact>) { if !ops.ops.is_empty() { self.unvalidated.mutate(ops); self.unvalidated_metrics .update(self.unvalidated.pool_section()); } } } impl ConsensusPool for ConsensusPoolImpl { fn validated(&self) -> &dyn PoolSection<ValidatedConsensusArtifact> { self.validated.pool_section() } fn unvalidated(&self) -> &dyn PoolSection<UnvalidatedConsensusArtifact> { self.unvalidated.pool_section() } fn as_cache(&self) -> &dyn ConsensusPoolCache { self.cache.as_ref() } } impl MutableConsensusPool for ConsensusPoolImpl { fn insert(&mut self, unvalidated_artifact: UnvalidatedConsensusArtifact) { let mut ops = PoolSectionOps::new(); ops.insert(unvalidated_artifact); self.apply_changes_unvalidated(ops); } fn apply_changes(&mut self, time_source: &dyn TimeSource, change_set: ChangeSet) { let updates = self.cache.prepare(&change_set); let mut unvalidated_ops = PoolSectionOps::new(); let mut validated_ops = PoolSectionOps::new(); // DO NOT Add a default nop. Explicitly mention all cases. // This helps with keeping this readable and obvious what // change is causing tests to break. for change_action in change_set { match change_action { ChangeAction::AddToValidated(to_add) => { validated_ops.insert(ValidatedConsensusArtifact { msg: to_add, timestamp: time_source.get_relative_time(), }); } ChangeAction::MoveToValidated(to_move) => { let msg_id = to_move.get_id(); let timestamp = self.unvalidated.get_timestamp(&msg_id).unwrap_or_else(|| { panic!("Timestmap is not found for MoveToValidated: {:?}", to_move) }); unvalidated_ops.remove(msg_id); validated_ops.insert(ValidatedConsensusArtifact { msg: to_move, timestamp, }); } ChangeAction::RemoveFromValidated(to_remove) => { validated_ops.remove(to_remove.get_id()); } ChangeAction::RemoveFromUnvalidated(to_remove) => { unvalidated_ops.remove(to_remove.get_id()); } ChangeAction::PurgeValidatedBelow(height) => { validated_ops.purge_below(height); } ChangeAction::PurgeUnvalidatedBelow(height) => { unvalidated_ops.purge_below(height); } ChangeAction::HandleInvalid(to_remove, _) => { unvalidated_ops.remove(to_remove.get_id()); } } } let artifacts_for_backup = validated_ops .ops .iter() .filter_map(|op| match op { PoolSectionOp::Insert(artifact) => Some(artifact.msg.clone()), _ => None, }) .collect(); self.apply_changes_unvalidated(unvalidated_ops); self.apply_changes_validated(validated_ops); if let Some(backup) = &self.backup { backup.store(time_source, artifacts_for_backup); } if !updates.is_empty() { self.cache.update(self, updates); } } } impl GossipPool<ConsensusMessage, ChangeSet> for ConsensusPoolImpl { type MessageId = ConsensusMessageId; type Filter = Height; fn contains(&self, id: &ConsensusMessageId) -> bool { self.unvalidated.contains(id) || self.validated.contains(id) } fn get_validated_by_identifier(&self, id: &ConsensusMessageId) -> Option<ConsensusMessage> { self.validated.get(id) } // Get max height for each Validated pool and chain all the // get_by_height_range() iterators. fn get_all_validated_by_filter( &self, filter: Self::Filter, ) -> Box<dyn Iterator<Item = ConsensusMessage>> { let max_catch_up_height = self .validated .catch_up_package() .height_range() .map(|x| x.max) .unwrap(); let min = max_catch_up_height.max(filter) + Height::from(1); let max_finalized_height = self .validated .finalization() .height_range() .map(|x| x.max) .unwrap_or(min); let min_finalized_height = min; let max_finalized_share_height = self .validated .finalization_share() .height_range() .map(|x| x.max) .unwrap_or(min); let min_finalized_share_height = max_finalized_height.increment(); let max_notarization_height = self .validated .notarization() .height_range() .map(|x| x.max) .unwrap_or(min); let min_notarization_height = min; let max_notarization_share_height = self .validated .notarization_share() .height_range() .map(|x| x.max) .unwrap_or(min); let min_notarization_share_height = max_notarization_height.increment(); let max_random_beacon_height = self .validated .random_beacon() .height_range() .map(|x| x.max) .unwrap_or(min); let min_random_beacon_height = min; let max_random_beacon_share_height = self .validated .random_beacon_share() .height_range() .map(|x| x.max) .unwrap_or(min); let min_random_beacon_share_height = max_random_beacon_height.increment(); let max_block_proposal_height = self .validated .block_proposal() .height_range() .map(|x| x.max) .unwrap_or(min); let min_block_proposal_height = min; // Because random tape & shares do not come in a consecutive sequence, we // compute a custom iterator through their height range to either return // a random tape if it is found, or the set of shares when the tape is // not found. let max_random_tape_height = self .validated .random_tape() .height_range() .map(|x| x.max) .unwrap_or(min); let min_random_tape_height = min; let max_random_tape_share_height = self .validated .random_tape_share() .height_range() .map(|x| x.max) .unwrap_or(min); // Compute a combined range let tape_range = min_random_tape_height.get() ..=max_random_tape_height .max(max_random_tape_share_height) .get(); let random_tapes = tape_range .clone() .map(|h| self.validated.random_tape().get_by_height(Height::from(h))) .collect::<Vec<_>>(); let random_tape_shares = tape_range .map(|h| { self.validated .random_tape_share() .get_by_height(Height::from(h)) }) .collect::<Vec<_>>(); let random_tape_iterator = random_tapes .into_iter() .zip(random_tape_shares.into_iter()) .flat_map(|(mut tape, shares)| { tape.next().map_or_else( || shares.map(|x| x.to_message()).collect::<Vec<_>>(), |x| vec![x.to_message()], ) }); Box::new( self.validated .catch_up_package() .get_by_height_range(HeightRange { min: max_catch_up_height.max(filter), max: max_catch_up_height, }) .map(|x| x.to_message()) .chain( self.validated .finalization() .get_by_height_range(HeightRange { min: min_finalized_height, max: max_finalized_height, }) .map(|x| x.to_message()), ) .chain( self.validated .finalization_share() .get_by_height_range(HeightRange { min: min_finalized_share_height, max: max_finalized_share_height, }) .map(|x| x.to_message()), ) .chain( self.validated .notarization() .get_by_height_range(HeightRange { min: min_notarization_height, max: max_notarization_height, }) .map(|x| x.to_message()), ) .chain( self.validated .notarization_share() .get_by_height_range(HeightRange { min: min_notarization_share_height, max: max_notarization_share_height, }) .map(|x| x.to_message()), ) .chain( self.validated .random_beacon() .get_by_height_range(HeightRange { min: min_random_beacon_height, max: max_random_beacon_height, }) .map(|x| x.to_message()), ) .chain( self.validated .random_beacon_share() .get_by_height_range(HeightRange { min: min_random_beacon_share_height, max: max_random_beacon_share_height, }) .map(|x| x.to_message()), ) .chain( self.validated .block_proposal() .get_by_height_range(HeightRange { min: min_block_proposal_height, max: max_block_proposal_height, }) .map(|x| x.to_message()), ) .chain(random_tape_iterator), ) } } impl ConsensusGossipPool for ConsensusPoolImpl {} #[cfg(test)] mod tests { use super::*; use ic_consensus_message::make_genesis; use ic_interfaces::artifact_pool::UnvalidatedArtifact; use ic_logger::replica_logger::no_op_logger; use ic_metrics::MetricsRegistry; use ic_protobuf::types::v1 as pb; use ic_test_utilities::{ consensus::fake::*, mock_time, types::ids::{node_test_id, subnet_test_id}, FastForwardTimeSource, }; use ic_types::{ batch::ValidationContext, consensus::{BlockProposal, RandomBeacon}, crypto::{CryptoHash, CryptoHashOf}, RegistryVersion, }; use prost::Message; use std::convert::TryFrom; use std::{fs, io::Read}; #[test] fn test_timestamp() { ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { let time_source = FastForwardTimeSource::new(); let time_0 = time_source.get_relative_time(); let mut pool = ConsensusPoolImpl::new_from_cup_without_bytes( subnet_test_id(0), make_genesis(ic_types::consensus::dkg::Summary::fake()), pool_config, ic_metrics::MetricsRegistry::new(), no_op_logger(), ); let mut random_beacon = RandomBeacon::fake(RandomBeaconContent::new( Height::from(0), CryptoHashOf::from(CryptoHash(Vec::new())), )); let msg_0 = random_beacon.clone().to_message(); let msg_id_0 = random_beacon.get_id(); random_beacon.content.height = Height::from(1); let msg_1 = random_beacon.clone().to_message(); let msg_id_1 = random_beacon.get_id(); pool.insert(UnvalidatedArtifact { message: msg_0.clone(), peer_id: node_test_id(0), timestamp: time_source.get_relative_time(), }); let time_1 = time_0 + Duration::from_secs(100); time_source.set_time(time_1).unwrap(); pool.insert(UnvalidatedArtifact { message: msg_1.clone(), peer_id: node_test_id(1), timestamp: time_source.get_relative_time(), }); // Check timestamp is the insertion time. assert_eq!(pool.unvalidated().get_timestamp(&msg_id_0), Some(time_0)); assert_eq!(pool.unvalidated().get_timestamp(&msg_id_1), Some(time_1)); let mut changeset = ChangeSet::new(); changeset.push(ChangeAction::MoveToValidated(msg_0)); changeset.push(ChangeAction::RemoveFromUnvalidated(msg_1)); pool.apply_changes(time_source.as_ref(), changeset); // Check timestamp is carried over for msg_0. assert_eq!(pool.unvalidated().get_timestamp(&msg_id_0), None); assert_eq!(pool.validated().get_timestamp(&msg_id_0), Some(time_0)); // Check timestamp is removed for msg_1. assert_eq!(pool.unvalidated().get_timestamp(&msg_id_1), None); assert_eq!(pool.validated().get_timestamp(&msg_id_1), None); }) } #[test] // We create multiple artifacts for multiple heights, check that all of them are // written to the disk and can be restored. fn test_backup() { use crate::backup::bytes_to_hex_str; ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { let time_source = FastForwardTimeSource::new(); let backup_dir = tempfile::Builder::new().tempdir().unwrap(); let subnet_id = subnet_test_id(0); let root_path = backup_dir .path() .join(subnet_id.to_string()) .join(ic_types::ReplicaVersion::default().to_string()); let mut pool = ConsensusPoolImpl::new_from_cup_without_bytes( subnet_id, make_genesis(ic_types::consensus::dkg::Summary::fake()), pool_config, ic_metrics::MetricsRegistry::new(), no_op_logger(), ); let purging_interval = Duration::from_millis(100); pool.backup = Some(Backup::new( &pool, backup_dir.path().into(), root_path.clone(), // We purge all artifacts older than 5ms millisecond. Duration::from_millis(100), // We purge every 5 milliseconds. purging_interval, MetricsRegistry::new(), no_op_logger(), )); // All tests in this group work on artifacts inside the same group, so we extend // the path with it. let path = root_path.join("0"); let random_beacon = RandomBeacon::fake(RandomBeaconContent::new( Height::from(1), CryptoHashOf::from(CryptoHash(Vec::new())), )); let random_tape = RandomTape::fake(RandomTapeContent::new(Height::from(2))); let notarization = Notarization::fake(NotarizationContent::new( Height::from(2), CryptoHashOf::from(CryptoHash(vec![1, 2, 3])), )); let finalization = Finalization::fake(FinalizationContent::new( Height::from(3), CryptoHashOf::from(CryptoHash(vec![1, 2, 3])), )); let proposal = BlockProposal::fake( Block::new( CryptoHashOf::from(CryptoHash(Vec::new())), Payload::new( ic_crypto::crypto_hash, ic_types::consensus::dkg::Summary::fake().into(), ), Height::from(4), Rank(456), ValidationContext { registry_version: RegistryVersion::from(99), certified_height: Height::from(42), time: mock_time(), }, ), node_test_id(333), ); let genesis_cup = make_genesis(ic_types::consensus::dkg::Summary::fake()); let mut cup = genesis_cup.clone(); cup.content.random_beacon = hashed::Hashed::new( ic_crypto::crypto_hash, RandomBeacon::fake(RandomBeaconContent::new( Height::from(4), CryptoHashOf::from(CryptoHash(Vec::new())), )), ); let changeset = vec![ random_beacon.clone().to_message(), random_tape.clone().to_message(), finalization.clone().to_message(), notarization.clone().to_message(), proposal.clone().to_message(), cup.clone().to_message(), ] .into_iter() .map(ChangeAction::AddToValidated) .collect(); pool.apply_changes(time_source.as_ref(), changeset); // We let the pool apply empty change set, so that it triggers the backup, // which will block on the previous backup execution, which is running // asynchronously. This way, we make sure the backup is written and the test // can continue. pool.apply_changes(time_source.as_ref(), Vec::new()); // Check backup for height 0 assert!( path.join("0").join("catch_up_package.bin").exists(), "catch-up package at height 0 was backed up" ); assert!( path.join("0").join("random_beacon.bin").exists(), "random beacon at height 0 was backed up" ); assert_eq!( fs::read_dir(path.join("0")).unwrap().count(), 2, "two artifacts for height 0 were backed up" ); let mut file = fs::File::open(path.join("0").join("catch_up_package.bin")).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = CatchUpPackage::try_from(&pb::CatchUpPackage::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( genesis_cup, restored, "restored catch-up package is identical with the original one" ); // Check backup for height 1 assert!( path.join("1").join("random_beacon.bin").exists(), "random beacon at height 1 was backed up" ); assert_eq!( fs::read_dir(path.join("1")).unwrap().count(), 1, "only one artifact for height 1 was backed up" ); let mut file = fs::File::open(path.join("1").join("random_beacon.bin")).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = RandomBeacon::try_from(pb::RandomBeacon::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( random_beacon, restored, "restored random beacon is identical with the original one" ); let notarization_path = path.join("2").join(format!( "notarization_{}_{}.bin", bytes_to_hex_str(&notarization.content.block), bytes_to_hex_str(&ic_crypto::crypto_hash(&notarization)), )); assert!( path.join("2").join("random_tape.bin").exists(), "random tape at height 2 was backed up" ); assert!( notarization_path.exists(), "notarization at height 2 was backed up" ); assert_eq!( fs::read_dir(path.join("2")).unwrap().count(), 2, "only two artifacts for height 2 were backed up" ); let mut file = fs::File::open(path.join("2").join("random_tape.bin")).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = RandomTape::try_from(pb::RandomTape::decode(buffer.as_slice()).unwrap()).unwrap(); assert_eq!( random_tape, restored, "restored random tape is identical with the original one" ); let mut file = fs::File::open(notarization_path).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = Notarization::try_from(pb::Notarization::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( notarization, restored, "restored notarization is identical with the original one" ); // Check backup for height 3 let finalization_path = path.join("3").join(format!( "finalization_{}_{}.bin", bytes_to_hex_str(&finalization.content.block), bytes_to_hex_str(&ic_crypto::crypto_hash(&finalization)), )); assert!( finalization_path.exists(), "finalization at height 3 was backed up", ); assert_eq!( fs::read_dir(path.join("3")).unwrap().count(), 1, "only one artifact for height 3 was backed up" ); let mut file = fs::File::open(path.join("3").join(finalization_path)).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = Finalization::try_from(pb::Finalization::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( finalization, restored, "restored finalization is identical with the original one" ); // Check backup for height 4 let proposal_path = path.join("4").join(format!( "block_proposal_{}_{}.bin", bytes_to_hex_str(&proposal.content.get_hash()), bytes_to_hex_str(&ic_crypto::crypto_hash(&proposal)), )); assert!( path.join("4").join("catch_up_package.bin").exists(), "catch-up package at height 4 was backed up" ); assert!( proposal_path.exists(), "block proposal at height 4 was backed up" ); assert_eq!( fs::read_dir(path.join("4")).unwrap().count(), 2, "two artifacts for height 4 were backed up" ); let mut file = fs::File::open(path.join("4").join("catch_up_package.bin")).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = CatchUpPackage::try_from(&pb::CatchUpPackage::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( cup, restored, "restored catch-up package is identical with the original one" ); let mut file = fs::File::open(proposal_path).unwrap(); let mut buffer = Vec::new(); file.read_to_end(&mut buffer).unwrap(); let restored = BlockProposal::try_from(pb::BlockProposal::decode(buffer.as_slice()).unwrap()) .unwrap(); assert_eq!( proposal, restored, "restored catch-up package is identical with the original one" ); // Now we fast-forward the time for purging being definitely overdue. // Before we purge, we sleep for one purging interval, making sure artifacts are // old enough. Then we sleep again so that the group folder is // removed as well. Note that we measure the age of artifacts using // the FS timestamp and cannot fast-forward it. for _ in 0..2 { let sleep_time = purging_interval; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // This should cause purging. pool.apply_changes(time_source.as_ref(), Vec::new()); pool.apply_changes(time_source.as_ref(), Vec::new()); } // Make sure the subnet directory is empty, as we purged everything. assert_eq!(fs::read_dir(&path).unwrap().count(), 0); // We sleep for ont interval more and make sure we also delete the subnet // directory let sleep_time = 2 * purging_interval; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); pool.apply_changes(time_source.as_ref(), Vec::new()); pool.apply_changes(time_source.as_ref(), Vec::new()); assert!(!path.exists()); }) } #[test] fn test_backup_purging() { ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { let time_source = FastForwardTimeSource::new(); let backup_dir = tempfile::Builder::new().tempdir().unwrap(); let subnet_id = subnet_test_id(0); let path = backup_dir.path().join(format!("{:?}", subnet_id)); let mut pool = ConsensusPoolImpl::new_from_cup_without_bytes( subnet_id, make_genesis(ic_types::consensus::dkg::Summary::fake()), pool_config, ic_metrics::MetricsRegistry::new(), no_op_logger(), ); let purging_interval = Duration::from_millis(1000); pool.backup = Some(Backup::new( &pool, backup_dir.path().into(), backup_dir.path().join(format!("{:?}", subnet_id)), // Artifact retention time Duration::from_millis(900), purging_interval, MetricsRegistry::new(), no_op_logger(), )); let random_beacon = RandomBeacon::fake(RandomBeaconContent::new( Height::from(1), CryptoHashOf::from(CryptoHash(Vec::new())), )); let random_tape = RandomTape::fake(RandomTapeContent::new(Height::from(2))); let notarization = Notarization::fake(NotarizationContent::new( Height::from(3), CryptoHashOf::from(CryptoHash(vec![1, 2, 3])), )); let proposal = BlockProposal::fake( Block::new( CryptoHashOf::from(CryptoHash(Vec::new())), Payload::new( ic_crypto::crypto_hash, ic_types::consensus::dkg::Summary::fake().into(), ), Height::from(4), Rank(456), ValidationContext { registry_version: RegistryVersion::from(99), certified_height: Height::from(42), time: mock_time(), }, ), node_test_id(333), ); let changeset = vec![random_beacon.to_message(), random_tape.to_message()] .into_iter() .map(ChangeAction::AddToValidated) .collect(); // Trigger purging timestamp to update. pool.apply_changes(time_source.as_ref(), Vec::new()); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // Apply changes pool.apply_changes(time_source.as_ref(), changeset); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); let group_path = &path.join("0"); // We expect 3 folders for heights 0 to 2. assert_eq!(fs::read_dir(&group_path).unwrap().count(), 3); // Let's sleep so that the previous heights are close to being purged. let sleep_time = purging_interval / 10 * 8; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // Now add new artifacts let changeset = vec![notarization.to_message(), proposal.to_message()] .into_iter() .map(ChangeAction::AddToValidated) .collect(); pool.apply_changes(time_source.as_ref(), changeset); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // We expect 5 folders for heights 0 to 4. assert_eq!(fs::read_dir(&group_path).unwrap().count(), 5); // We sleep just enough so that purging is overdue and the oldest artifacts are // approximately 1 purging interval old. let sleep_time = purging_interval / 10 * 3; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // Trigger the purging. pool.apply_changes(time_source.as_ref(), Vec::new()); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // We expect only 2 folders to survive the purging: 3, 4 assert_eq!(fs::read_dir(&group_path).unwrap().count(), 2); assert!(group_path.join("3").exists()); assert!(group_path.join("4").exists()); let sleep_time = purging_interval + purging_interval / 10 * 3; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // Trigger the purging. pool.apply_changes(time_source.as_ref(), Vec::new()); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // We deleted all artifacts, but the group folder was updated by this and needs // to age now. assert!(group_path.exists()); assert_eq!(fs::read_dir(&group_path).unwrap().count(), 0); let sleep_time = purging_interval + purging_interval / 10 * 3; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // Trigger the purging. pool.apply_changes(time_source.as_ref(), Vec::new()); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // The group folder expired and was deleted. assert!(!group_path.exists()); assert_eq!(fs::read_dir(&path).unwrap().count(), 0); // We wait more and make sure the subnet folder is purged. let sleep_time = purging_interval + purging_interval / 10 * 3; std::thread::sleep(sleep_time); time_source .set_time(time_source.get_relative_time() + sleep_time) .unwrap(); // Trigger the purging. pool.apply_changes(time_source.as_ref(), Vec::new()); // sync pool.apply_changes(time_source.as_ref(), Vec::new()); // The subnet_id folder expired and was deleted. assert!(!path.exists()); assert_eq!(fs::read_dir(&backup_dir).unwrap().count(), 0); }) } }
update_from_height_indexed_pool
noUnsafeAnyRule.js
"use strict"; /** * @license * Copyright 2017 Palantir Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); exports.Rule = void 0; var tslib_1 = require("tslib"); var tsutils_1 = require("tsutils"); var ts = require("typescript"); var Lint = require("../index"); var utils_1 = require("../utils"); var Rule = /** @class */ (function (_super) { tslib_1.__extends(Rule, _super); function Rule() { return _super !== null && _super.apply(this, arguments) || this; } Rule.prototype.applyWithProgram = function (sourceFile, program) { return this.applyWithWalker(new NoUnsafeAnyWalker(sourceFile, this.ruleName, program.getTypeChecker())); }; /* tslint:disable:object-literal-sort-keys */ Rule.metadata = { ruleName: "no-unsafe-any", description: Lint.Utils.dedent(templateObject_1 || (templateObject_1 = tslib_1.__makeTemplateObject(["\n Warns when using an expression of type 'any' in a dynamic way.\n Uses are only allowed if they would work for `{} | null | undefined`.\n Downcasting to unknown is always safe.\n Type casts and tests are allowed.\n Expressions that work on all values (such as `\"\" + x`) are allowed."], ["\n Warns when using an expression of type 'any' in a dynamic way.\n Uses are only allowed if they would work for \\`{} | null | undefined\\`.\n Downcasting to unknown is always safe.\n Type casts and tests are allowed.\n Expressions that work on all values (such as \\`\"\" + x\\`) are allowed."]))), optionsDescription: "Not configurable.", options: null, optionExamples: [true], rationale: Lint.Utils.dedent(templateObject_2 || (templateObject_2 = tslib_1.__makeTemplateObject(["\n If you're dealing with data of unknown or \"any\" types, you shouldn't be accessing members of it.\n Either add type annotations for properties that may exist or change the data type to the empty object type `{}`.\n\n Alternately, if you're creating storage or handling for consistent but unknown types, such as in data structures\n or serialization, use `<T>` template types for generic type handling.\n\n Also see the `no-any` rule.\n "], ["\n If you're dealing with data of unknown or \"any\" types, you shouldn't be accessing members of it.\n Either add type annotations for properties that may exist or change the data type to the empty object type \\`{}\\`.\n\n Alternately, if you're creating storage or handling for consistent but unknown types, such as in data structures\n or serialization, use \\`<T>\\` template types for generic type handling.\n\n Also see the \\`no-any\\` rule.\n "]))), type: "functionality", typescriptOnly: true, requiresTypeInfo: true, }; /* tslint:enable:object-literal-sort-keys */ Rule.FAILURE_STRING = "Unsafe use of expression of type 'any'."; return Rule; }(Lint.Rules.TypedRule)); exports.Rule = Rule; var NoUnsafeAnyWalker = /** @class */ (function (_super) { tslib_1.__extends(NoUnsafeAnyWalker, _super); function NoUnsafeAnyWalker(sourceFile, ruleName, checker) { var _this = _super.call(this, sourceFile, ruleName, undefined) || this; _this.checker = checker; /** Wraps `visitNode` with the correct `this` binding and discards the return value to prevent `forEachChild` from returning early */ _this.visitNodeCallback = function (node) { return void _this.visitNode(node); }; return _this; } NoUnsafeAnyWalker.prototype.walk = function (sourceFile) { if (sourceFile.isDeclarationFile) { return; // Not possible in a declaration file. } sourceFile.statements.forEach(this.visitNodeCallback); }; NoUnsafeAnyWalker.prototype.visitNode = function (node, anyOk) { switch (node.kind) { case ts.SyntaxKind.ParenthesizedExpression: // Don't warn on a parenthesized expression, warn on its contents. return this.visitNode(node.expression, anyOk); case ts.SyntaxKind.LabeledStatement: // Ignore label return this.visitNode(node.statement); // ignore labels case ts.SyntaxKind.BreakStatement: case ts.SyntaxKind.ContinueStatement: // Ignore types case ts.SyntaxKind.InterfaceDeclaration: case ts.SyntaxKind.TypeAliasDeclaration: case ts.SyntaxKind.TypeParameter: case ts.SyntaxKind.IndexSignature: // Ignore imports case ts.SyntaxKind.ImportEqualsDeclaration: case ts.SyntaxKind.ImportDeclaration: case ts.SyntaxKind.ExportDeclaration: case ts.SyntaxKind.ExportAssignment: return false; case ts.SyntaxKind.ThisKeyword: case ts.SyntaxKind.Identifier: return anyOk ? false : this.check(node); // Recurse through these, but ignore the immediate child because it is allowed to be 'any'. case ts.SyntaxKind.DeleteExpression: case ts.SyntaxKind.ExpressionStatement: case ts.SyntaxKind.TypeAssertionExpression: case ts.SyntaxKind.AsExpression: case ts.SyntaxKind.TemplateSpan: // Allow stringification (works on all values). Note: tagged templates handled differently. case ts.SyntaxKind.TypeOfExpression: case ts.SyntaxKind.VoidExpression: return this.visitNode(node.expression, true); case ts.SyntaxKind.ThrowStatement: { var expression = node.expression; return expression !== undefined ? this.visitNode(expression, true) : false; } case ts.SyntaxKind.PropertyAssignment: { var _a = node, name = _a.name, initializer = _a.initializer; this.visitNode(name, /*anyOk*/ true); if (tsutils_1.isReassignmentTarget(node.parent)) { return this.visitNode(initializer, true); } return this.checkContextualType(initializer, true); } case ts.SyntaxKind.ShorthandPropertyAssignment: { var _b = node, name = _b.name, objectAssignmentInitializer = _b.objectAssignmentInitializer; if (objectAssignmentInitializer !== undefined) { return this.checkContextualType(objectAssignmentInitializer); } return this.checkContextualType(name, true); } case ts.SyntaxKind.PropertyDeclaration: { var _c = node, name = _c.name, initializer = _c.initializer; this.visitNode(name, true); return (initializer !== undefined && this.visitNode(initializer, isPropertyAnyOrUnknown(node, this.checker))); } case ts.SyntaxKind.SpreadAssignment: return this.visitNode(node.expression, // allow any in object spread, but not in object rest !tsutils_1.isReassignmentTarget(node.parent)); case ts.SyntaxKind.ComputedPropertyName: return this.visitNode(node.expression, true); case ts.SyntaxKind.TaggedTemplateExpression: { var _d = node, tag = _d.tag, template = _d.template; if (template.kind === ts.SyntaxKind.TemplateExpression) { for (var _i = 0, _e = template.templateSpans; _i < _e.length; _i++) { var expression = _e[_i].expression; this.checkContextualType(expression); } } // Also check the template expression itself if (this.visitNode(tag)) { return true; } return anyOk ? false : this.check(node); } case ts.SyntaxKind.CallExpression: case ts.SyntaxKind.NewExpression: { var _f = node, expression = _f.expression, args = _f.arguments; if (args !== undefined) { for (var _g = 0, args_1 = args; _g < args_1.length; _g++) { var arg = args_1[_g]; this.checkContextualType(arg); } } if (this.visitNode(expression)) { return true; } // Also check the call expression itself return anyOk ? false : this.check(node); } case ts.SyntaxKind.PropertyAccessExpression: // Don't warn for right hand side; this is redundant if we warn for the access itself. if (this.visitNode(node.expression)) { return true; } return anyOk ? false : this.check(node); case ts.SyntaxKind.ElementAccessExpression: { var _h = node, expression = _h.expression, argumentExpression = _h.argumentExpression;
} if (this.visitNode(expression)) { return true; } return anyOk ? false : this.check(node); } case ts.SyntaxKind.ReturnStatement: { var expression = node.expression; return expression !== undefined && this.checkContextualType(expression, true); } case ts.SyntaxKind.SwitchStatement: { var _j = node, expression = _j.expression, clauses = _j.caseBlock.clauses; // Allow `switch (x) {}` where `x` is any this.visitNode(expression, /*anyOk*/ true); for (var _k = 0, clauses_1 = clauses; _k < clauses_1.length; _k++) { var clause = clauses_1[_k]; if (clause.kind === ts.SyntaxKind.CaseClause) { // Allow `case x:` where `x` is any this.visitNode(clause.expression, /*anyOk*/ true); } for (var _l = 0, _m = clause.statements; _l < _m.length; _l++) { var statement = _m[_l]; this.visitNode(statement); } } return false; } case ts.SyntaxKind.ModuleDeclaration: { // In `declare global { ... }`, don't mark `global` as unsafe any. var body = node.body; return body !== undefined && this.visitNode(body); } case ts.SyntaxKind.IfStatement: { var _o = node, expression = _o.expression, thenStatement = _o.thenStatement, elseStatement = _o.elseStatement; this.visitNode(expression, true); // allow truthyness check this.visitNode(thenStatement); return elseStatement !== undefined && this.visitNode(elseStatement); } case ts.SyntaxKind.PrefixUnaryExpression: { var _p = node, operator = _p.operator, operand = _p.operand; this.visitNode(operand, operator === ts.SyntaxKind.ExclamationToken); // allow falsyness check return false; } case ts.SyntaxKind.ForStatement: { var _q = node, initializer = _q.initializer, condition = _q.condition, incrementor = _q.incrementor, statement = _q.statement; if (initializer !== undefined) { this.visitNode(initializer, true); } if (condition !== undefined) { this.visitNode(condition, true); } // allow truthyness check if (incrementor !== undefined) { this.visitNode(incrementor, true); } return this.visitNode(statement); } case ts.SyntaxKind.DoStatement: case ts.SyntaxKind.WhileStatement: this.visitNode(node.expression, true); return this.visitNode(node.statement); case ts.SyntaxKind.ConditionalExpression: { var _r = node, condition = _r.condition, whenTrue = _r.whenTrue, whenFalse = _r.whenFalse; this.visitNode(condition, true); var left = this.visitNode(whenTrue, anyOk); return this.visitNode(whenFalse, anyOk) || left; } case ts.SyntaxKind.VariableDeclaration: case ts.SyntaxKind.Parameter: return this.checkVariableOrParameterDeclaration(node); case ts.SyntaxKind.BinaryExpression: return this.checkBinaryExpression(node, anyOk); case ts.SyntaxKind.AwaitExpression: this.visitNode(node.expression); return anyOk ? false : this.check(node); case ts.SyntaxKind.YieldExpression: return this.checkYieldExpression(node, anyOk); case ts.SyntaxKind.ClassExpression: case ts.SyntaxKind.ClassDeclaration: this.checkClassLikeDeclaration(node); return false; case ts.SyntaxKind.ArrayLiteralExpression: { for (var _s = 0, _t = node.elements; _s < _t.length; _s++) { var element = _t[_s]; this.checkContextualType(element, true); } return false; } case ts.SyntaxKind.JsxExpression: return (node.expression !== undefined && this.checkContextualType(node.expression)); } if (tsutils_1.isTypeNodeKind(node.kind) || tsutils_1.isTokenKind(node.kind)) { return false; } return ts.forEachChild(node, this.visitNodeCallback); }; NoUnsafeAnyWalker.prototype.check = function (node) { if (!isNodeAny(node, this.checker)) { return false; } this.addFailureAtNode(node, Rule.FAILURE_STRING); return true; }; NoUnsafeAnyWalker.prototype.checkContextualType = function (node, allowIfNoContextualType) { var type = this.checker.getContextualType(node); var anyOk = (type === undefined && allowIfNoContextualType) || isAny(type, true); return this.visitNode(node, anyOk); }; // Allow `const x = foo;` and `const x: any = foo`, but not `const x: Foo = foo;`. NoUnsafeAnyWalker.prototype.checkVariableOrParameterDeclaration = function (_a) { var name = _a.name, type = _a.type, initializer = _a.initializer; this.checkBindingName(name); // Always allow the LHS to be `any`. Just don't allow RHS to be `any` when LHS isn't `any` or `unknown`. var anyOk = (name.kind === ts.SyntaxKind.Identifier && (type === undefined || type.kind === ts.SyntaxKind.AnyKeyword || type.kind === ts.SyntaxKind.UnknownKeyword)) || (type !== undefined && type.kind === ts.SyntaxKind.AnyKeyword) || (type !== undefined && type.kind === ts.SyntaxKind.UnknownKeyword); return initializer !== undefined && this.visitNode(initializer, anyOk); }; NoUnsafeAnyWalker.prototype.checkBinaryExpression = function (node, anyOk) { var allowAnyLeft = false; var allowAnyRight = false; switch (node.operatorToken.kind) { case ts.SyntaxKind.ExclamationEqualsEqualsToken: case ts.SyntaxKind.ExclamationEqualsToken: case ts.SyntaxKind.EqualsEqualsEqualsToken: case ts.SyntaxKind.EqualsEqualsToken: case ts.SyntaxKind.CommaToken: // Allow `any, any` case ts.SyntaxKind.BarBarToken: // Allow `any || any` case ts.SyntaxKind.AmpersandAmpersandToken: // Allow `any && any` allowAnyLeft = allowAnyRight = true; break; case ts.SyntaxKind.InstanceOfKeyword: // Allow test allowAnyLeft = true; break; case ts.SyntaxKind.EqualsToken: // Allow assignment if the lhs is also *any*. allowAnyLeft = true; allowAnyRight = isNodeAny(node.left, this.checker, true); break; case ts.SyntaxKind.PlusToken: // Allow implicit stringification case ts.SyntaxKind.PlusEqualsToken: allowAnyLeft = allowAnyRight = isStringLike(node.left, this.checker) || (isStringLike(node.right, this.checker) && node.operatorToken.kind === ts.SyntaxKind.PlusToken); } this.visitNode(node.left, allowAnyLeft); this.visitNode(node.right, allowAnyRight); return anyOk ? false : this.check(node); }; NoUnsafeAnyWalker.prototype.checkYieldExpression = function (node, anyOk) { if (node.expression !== undefined) { this.checkContextualType(node.expression, true); } if (anyOk) { return false; } this.addFailureAtNode(node, Rule.FAILURE_STRING); return true; }; NoUnsafeAnyWalker.prototype.checkClassLikeDeclaration = function (node) { if (node.decorators !== undefined) { node.decorators.forEach(this.visitNodeCallback); } if (node.heritageClauses !== undefined) { node.heritageClauses.forEach(this.visitNodeCallback); } return node.members.forEach(this.visitNodeCallback); }; NoUnsafeAnyWalker.prototype.checkBindingName = function (node) { if (node.kind !== ts.SyntaxKind.Identifier) { if (isNodeAny(node, this.checker)) { this.addFailureAtNode(node, Rule.FAILURE_STRING); } for (var _i = 0, _a = node.elements; _i < _a.length; _i++) { var element = _a[_i]; if (element.kind !== ts.SyntaxKind.OmittedExpression) { if (element.propertyName !== undefined && element.propertyName.kind === ts.SyntaxKind.ComputedPropertyName) { this.visitNode(element.propertyName.expression); } this.checkBindingName(element.name); if (element.initializer !== undefined) { this.checkContextualType(element.initializer); } } } } }; return NoUnsafeAnyWalker; }(Lint.AbstractWalker)); /** Check if property has no type annotation in this class and the base class */ function isPropertyAnyOrUnknown(node, checker) { if (!isNodeAny(node.name, checker, true) || node.name.kind === ts.SyntaxKind.ComputedPropertyName) { return false; } for (var _i = 0, _a = checker.getBaseTypes(checker.getTypeAtLocation(node.parent)); _i < _a.length; _i++) { var base = _a[_i]; var prop = base.getProperty(node.name.text); if (prop !== undefined && prop.declarations !== undefined) { return isAny(checker.getTypeOfSymbolAtLocation(prop, prop.declarations[0]), true); } } return true; } /** * @param orUnknown If true, this function will also return true when the node is unknown. */ function isNodeAny(node, checker, orUnknown) { if (orUnknown === void 0) { orUnknown = false; } var symbol = checker.getSymbolAtLocation(node); if (symbol !== undefined && tsutils_1.isSymbolFlagSet(symbol, ts.SymbolFlags.Alias)) { symbol = checker.getAliasedSymbol(symbol); } if (symbol !== undefined) { // NamespaceModule is a type-only namespace without runtime value, its type is 'any' when used as 'ns.Type' -> avoid error if (tsutils_1.isSymbolFlagSet(symbol, ts.SymbolFlags.NamespaceModule)) { return false; } if (tsutils_1.isSymbolFlagSet(symbol, ts.SymbolFlags.Type)) { return isAny(checker.getDeclaredTypeOfSymbol(symbol), orUnknown); } } // Lowercase JSX elements are assumed to be allowed by design if (isJsxNativeElement(node)) { return false; } return isAny(checker.getTypeAtLocation(node), orUnknown); } var jsxElementTypes = new Set([ ts.SyntaxKind.JsxClosingElement, ts.SyntaxKind.JsxOpeningElement, ts.SyntaxKind.JsxSelfClosingElement, ]); function isJsxNativeElement(node) { if (!tsutils_1.isIdentifier(node) || node.parent === undefined) { return false; } // TypeScript <=2.1 incorrectly parses JSX fragments if (node.text === "") { return true; } return jsxElementTypes.has(node.parent.kind) && utils_1.isLowerCase(node.text[0]); } function isStringLike(expr, checker) { return tsutils_1.isTypeFlagSet(checker.getTypeAtLocation(expr), ts.TypeFlags.StringLike); } function isAny(type, orUnknown) { if (orUnknown === void 0) { orUnknown = false; } return (type !== undefined && (tsutils_1.isTypeFlagSet(type, ts.TypeFlags.Any) || (orUnknown && tsutils_1.isTypeFlagSet(type, ts.TypeFlags.Unknown)))); } var templateObject_1, templateObject_2;
if (argumentExpression !== undefined) { this.visitNode(argumentExpression, true);
buffer_context.rs
//! Contains all of the OpenGL state types related to buffer objects. use std::mem; use std::ptr; use std::marker::PhantomData; use std::borrow::BorrowMut; use gl; use gl::types::*; use context::{ContextOf, BaseContext, AContext}; use buffer::{Buffer, BufferDataUsage, BufferBindingTarget}; use program::{ProgramAttrib}; use index_data::{IndexData, IndexDatum, IndexDatumType}; use types::{DataType, DrawingMode, GLObject, GLError}; use to_ref::{ToRef, ToMut}; unsafe fn _draw_elements(mode: DrawingMode, count: usize, index_type: IndexDatumType, indices: *const GLvoid) { let gl_index_type: GLenum = match index_type { IndexDatumType::UnsignedByte => gl::UNSIGNED_BYTE, IndexDatumType::UnsignedShort => gl::UNSIGNED_SHORT }; gl::DrawElements(mode.gl_enum(), count as GLsizei, gl_index_type, indices); dbg_gl_error! { GLError::InvalidEnum => "`mode` or `type` is not an accepted value", GLError::InvalidValue => "`count` is negative", GLError::InvalidFramebufferOperation => "The current framebuffer is not framebuffer-complete", _ => "Unknown error" } } fn _bind_buffer(target: BufferBindingTarget, buffer: &mut Buffer) { unsafe { gl::BindBuffer(target as GLuint, buffer.id()); dbg_gl_sanity_check! { GLError::InvalidEnum => "`target` is not an allowed value", _ => "Unknown error" } } } /// An extension trait that includes buffer-object-related OpenGL methods. pub trait ContextBufferExt: BaseContext { /// Create a new, empty OpenGL buffer object. /// /// # See also /// [`glGenBuffers`](http://docs.gl/es2/glGenBuffers) OpenGL docs /// /// [`gl.new_vertex_buffer`](../../vertex_buffer/trait.ContextVertexBufferExt.html#method.new_vertex_buffer): /// Create a new vertex buffer, which wraps a raw OpenGL buffer. /// /// [`gl.new_index_buffer`](../struct.ContextOf.html#method.new_index_buffer): /// Create a new index buffer, which wraps a raw OpenGL buffer. /// /// [`gl.buffer_bytes`](trait.ContextBufferExt.html#method.buffer_bytes): /// Send data to a buffer. fn gen_buffer(&self) -> Buffer { let mut id : GLuint = 0; unsafe { gl::GenBuffers(1, &mut id as *mut GLuint); } dbg_gl_sanity_check! { GLError::InvalidValue => "`n` is negative", _ => "Unknown error" } unsafe { Buffer::from_raw(id) } } /// Send data to a buffer object. Note that this will replace the buffer's /// current contents, if any. /// /// # See also /// [`glBufferData`](http://docs.gl/es2/glBufferData) OpenGL docs fn buffer_bytes<B>(&self, gl_buffer: &mut B, bytes: &[u8], usage: BufferDataUsage) where B: BufferBinding { let ptr = bytes.as_ptr(); let size = bytes.len() * mem::size_of::<u8>(); unsafe { gl::BufferData(gl_buffer.target().gl_enum(), size as GLsizeiptr, ptr as *const GLvoid, usage.gl_enum()); dbg_gl_error! { GLError::InvalidEnum => "Invalid `target` or `usage`", GLError::InvalidValue => "`size` is negative", GLError::InvalidOperation => "Object 0 is bound to buffer target", GLError::OutOfMemory => "Unable to create a large enough buffer", _ => "Unknown error" } } } /// Specify how an array of vertex data will be treated while rendering. /// Most uses of this function can be replaced by using a [`VertexBuffer`] /// (../../vertex_buffer/struct.VertexBuffer.html), which provides a nicer /// interface for setting up vertex attributes. /// /// # Panics /// This function will panic in debug mode if `components` is less than 1 or /// greater than 4. /// /// # Safety /// Using this function can cause an OpenGL draw call to read uninitialized /// memory from a buffer. /// /// # See also /// [`glVertexAttribPointer`](http://docs.gl/es2/glVertexAttribPointer) OpenGL docs unsafe fn vertex_attrib_pointer(&self, attrib: ProgramAttrib, components: i8, gl_type: DataType, normalized: bool, stride: usize, offset: usize) { debug_assert!(1 <= components && components <= 4); let gl_normalized = if normalized { gl::TRUE } else { gl::FALSE }; gl::VertexAttribPointer(attrib.gl_index, components as GLint, gl_type.gl_enum(), gl_normalized, stride as GLsizei, offset as *const GLvoid); dbg_gl_error! { GLError::InvalidEnum => "Illegal vertex attribute type", GLError::InvalidValue => "`stride` is negative, `size` is not in range, or `index` is >= GL_MAX_VERTEX_ATTRIBS", GLError::InvalidFramebufferOperation => "Currently bound framebuffer is not framebuffer complete", _ => "Unknown error" } } /// Use the vertex data from the provided array buffer binding to render /// primitives. /// /// - `_ab`: The binding for the array buffer to read vertices from. /// - `mode`: The type of primitives to draw. /// - `first`: The index of the first vertex to read. /// - `count`: The number of vertices to read. /// /// # Safety /// The vertex attributes for the need to be set up before calling this /// method by using the [`gl.vertex_attrib_pointer`] /// (trait.ContextBufferExt.html#method.vertex_attrib_pointer) method. /// /// # See also /// [`glDrawArrays`](http://docs.gl/es2/glDrawArrays) OpenGL docs unsafe fn draw_arrays_range(&self, _ab: &ArrayBufferBinding, mode: DrawingMode, first: u32, count: usize) { gl::DrawArrays(mode.gl_enum(), first as GLint, count as GLsizei); dbg_gl_sanity_check! { GLError::InvalidEnum => "`mode` is not an accepted value", GLError::InvalidValue => "`count` is negative", _ => "Unknown error" } } /// Draw primitives specified by the provided element array buffer, treated /// as indices of the vertices from the provided array buffer. /// /// - `_ab`: The binding for the array buffer that contains the vertex /// data. /// - `_eab`: The binding for the element array buffer that contains the /// index data. /// - `mode`: The type of primitives to draw. /// - `count`: The number of indices to read. /// - `index_type`: Specifies the data type of the index (whether it is /// a byte or short, signed unsigned, etc). /// /// # See also /// [`glDrawElements`](http://docs.gl/es2/glDrawElements) OpenGL docs unsafe fn draw_n_elements_buffered(&self, _ab: &ArrayBufferBinding, _eab: &ElementArrayBufferBinding, mode: DrawingMode, count: usize, index_type: IndexDatumType) { _draw_elements(mode, count, index_type, ptr::null()); } /// Draw primitives specified by the provided index array, treated as /// indices of the vertices from the provided array buffer. /// /// - `_ab`: The binding for the array buffer that contains the vertex /// data. /// - `mode`: The type of primitives to draw. /// - `count`: The number of indices to read. /// - `indices`: The index array to use. /// /// # See also /// [`glDrawElements`](http://docs.gl/es2/glDrawElements) OpenGL docs unsafe fn draw_n_elements<I>(&self, _ab: &ArrayBufferBinding, mode: DrawingMode, count: usize, indices: &[I]) where I: IndexDatum, [I]: IndexData { debug_assert!(count <= indices.len()); let ptr = indices.index_bytes().as_ptr(); let index_type = I::index_datum_type(); _draw_elements(mode, count, index_type, mem::transmute(ptr)); } /// Draw primitives specified by the provided index array, treated as /// indices of the vertices from the provided array buffer. /// /// - `_ab`: The binding for the array buffer that contains the vertex /// data. /// - `mode`: The type of primitives to draw. /// - `indices`: The index array to use. /// /// # See also /// [`glDrawElements`](http://docs.gl/es2/glDrawElements) OpenGL docs unsafe fn draw_elements<I>(&self, _ab: &ArrayBufferBinding, mode: DrawingMode, indices: &[I]) where I: IndexDatum, [I]: IndexData { self.draw_n_elements(_ab, mode, indices.len(), indices); } } impl<C: BaseContext> ContextBufferExt for C { } /// An OpenGL context that has a free `GL_ARRAY_BUFFER` binding. pub trait ArrayBufferContext: AContext { /// The type of binder this context contains. type Binder: BorrowMut<ArrayBufferBinder>; /// The OpenGL context that will be returned after binding the array buffer. type Rest: AContext; /// Split this context into a binder and the remaining context. fn split_array_buffer(self) -> (Self::Binder, Self::Rest); /// Bind a buffer to this context's array buffer, returning /// a new context and a binding. /// /// # See also /// [`glBindBuffer`](http://docs.gl/es2/glBindBuffer) OpenGL docs fn bind_array_buffer<'a>(self, buffer: &'a mut Buffer) -> (ArrayBufferBinding<'a>, Self::Rest) where Self: Sized { let (mut binder, rest) = self.split_array_buffer(); (binder.borrow_mut().bind(buffer), rest) } } /// An OpenGL context that has a free `GL_ELEMENT_ARRAY_BUFFER` binding. pub trait ElementArrayBufferContext: AContext { /// The type of binder this context contains. type Binder: BorrowMut<ElementArrayBufferBinder>; /// The OpenGL context that will be returned after binding the element /// array buffer. type Rest: AContext; /// Split this context into a binder and the remaining context. fn split_element_array_buffer(self) -> (Self::Binder, Self::Rest); /// Bind a buffer to this context's element array buffer, returning /// a new context and a binding. /// /// # See also /// [`glBindBuffer`](http://docs.gl/es2/glBindBuffer) OpenGL docs fn bind_element_array_buffer<'a>(self, buffer: &'a mut Buffer) -> (ElementArrayBufferBinding<'a>, Self::Rest) where Self: Sized { let (mut binder, rest) = self.split_element_array_buffer(); (binder.borrow_mut().bind(buffer), rest) } } impl<BA, BE, F, P, R, T> ArrayBufferContext for ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder> { type Binder = BA; type Rest = ContextOf<BufferBinderOf<(), BE>, F, P, R, T>; fn split_array_buffer(self) -> (Self::Binder, Self::Rest) { let (buffers, gl) = self.swap_buffers(()); let (binder, rest_buffers) = buffers.swap_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } impl<'a, BA, BE, F, P, R, T> ArrayBufferContext for &'a mut ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder> { type Binder = &'a mut ArrayBufferBinder; type Rest = ContextOf<BufferBinderOf<(), &'a mut BE>, &'a mut F, &'a mut P, &'a mut R, &'a mut T>; fn split_array_buffer(self) -> (Self::Binder, Self::Rest) { let gl = self.borrowed_mut(); let (buffers, gl) = gl.swap_buffers(()); let buffers = buffers.borrowed_mut(); let (binder, rest_buffers) = buffers.swap_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } impl<'a, BA, BE, F, P, R, T> ArrayBufferContext for &'a mut ContextOf<&'a mut BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder>, F: ToMut<'a>, P: ToMut<'a>, R: ToMut<'a>, T: ToMut<'a> { type Binder = &'a mut ArrayBufferBinder; type Rest = ContextOf<BufferBinderOf<(), &'a mut BE>, F::Mut, P::Mut, R::Mut, T::Mut>; fn split_array_buffer(self) -> (Self::Binder, Self::Rest) { let gl = self.to_mut(); let (buffers, gl): (&mut BufferBinderOf<_, _>, _) = gl.swap_buffers(()); let buffers = buffers.borrowed_mut(); let (binder, rest_buffers) = buffers.swap_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } impl<BA, BE, F, P, R, T> ElementArrayBufferContext for ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BE: BorrowMut<ElementArrayBufferBinder> { type Binder = BE; type Rest = ContextOf<BufferBinderOf<BA, ()>, F, P, R, T>; fn split_element_array_buffer(self) -> (Self::Binder, Self::Rest) { let (buffers, gl) = self.swap_buffers(()); let (binder, rest_buffers) = buffers.swap_element_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } impl<'a, BA, BE, F, P, R, T> ElementArrayBufferContext for &'a mut ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BE: BorrowMut<ElementArrayBufferBinder> { type Binder = &'a mut ElementArrayBufferBinder; type Rest = ContextOf<BufferBinderOf<&'a mut BA, ()>, &'a mut F, &'a mut P, &'a mut R, &'a mut T>; fn split_element_array_buffer(self) -> (Self::Binder, Self::Rest) { let gl = self.borrowed_mut(); let (buffers, gl) = gl.swap_buffers(()); let buffers = buffers.borrowed_mut(); let (binder, rest_buffers) = buffers.swap_element_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } impl<'a, BA, BE, F, P, R, T> ElementArrayBufferContext for &'a mut ContextOf<&'a mut BufferBinderOf<BA, BE>, F, P, R, T> where BE: BorrowMut<ElementArrayBufferBinder>, F: ToMut<'a>, P: ToMut<'a>, R: ToMut<'a>, T: ToMut<'a> { type Binder = &'a mut ElementArrayBufferBinder; type Rest = ContextOf<BufferBinderOf<&'a mut BA, ()>, F::Mut, P::Mut, R::Mut, T::Mut>; fn split_element_array_buffer(self) -> (Self::Binder, Self::Rest) { let gl = self.to_mut(); let (buffers, gl): (&mut BufferBinderOf<_, _>, _) = gl.swap_buffers(()); let buffers = buffers.borrowed_mut(); let (binder, rest_buffers) = buffers.swap_element_array(()); let ((), gl) = gl.swap_buffers(rest_buffers); (binder, gl) } } /// An OpenGL context that has all free buffer bindings. This trait implies /// both [`ArrayBufferContext`](trait.ArrayBufferContext.html) and /// [`ElementArrayBufferContext`](trait.ElementArrayBufferContext.html). pub trait BufferContext: ArrayBufferContext + ElementArrayBufferContext { } impl<BA, BE, F, P, R, T> BufferContext for ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder>, BE: BorrowMut<ElementArrayBufferBinder> { } impl<'a, BA, BE, F, P, R, T> BufferContext for &'a mut ContextOf<BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder>, BE: BorrowMut<ElementArrayBufferBinder>, F: ToMut<'a>, P: ToMut<'a>, R: ToMut<'a>, T: ToMut<'a> { } impl<'a, BA, BE, F, P, R, T> BufferContext for &'a mut ContextOf<&'a mut BufferBinderOf<BA, BE>, F, P, R, T> where BA: BorrowMut<ArrayBufferBinder>, BE: BorrowMut<ElementArrayBufferBinder>, F: ToMut<'a>, P: ToMut<'a>, R: ToMut<'a>, T: ToMut<'a> { } /// A buffer that has been bound to an OpenGL buffer binding point. pub trait BufferBinding { /// Returns the OpenGL binding target that this buffer binding /// references. fn target(&self) -> BufferBindingTarget; } /// Represents a buffer that has been bound to the `GL_ARRAY_BUFFER` /// binding target. pub struct ArrayBufferBinding<'a> { _phantom_ref: PhantomData<&'a mut Buffer>, _phantom_ptr: PhantomData<*mut ()> } impl<'a> BufferBinding for ArrayBufferBinding<'a> { fn target(&self) -> BufferBindingTarget { BufferBindingTarget::ArrayBuffer } } /// Represents a buffer that has been bound to the `GL_ELEMENT_ARRAY_BUFFER` /// binding target. pub struct ElementArrayBufferBinding<'a> { _phantom_ref: PhantomData<&'a mut Buffer>, _phantom_ptr: PhantomData<*mut ()> } impl<'a> BufferBinding for ElementArrayBufferBinding<'a> { fn target(&self) -> BufferBindingTarget { BufferBindingTarget::ElementArrayBuffer } } /// This type holds all of the OpenGL-state-related buffer objects. See the /// [`ContextOf`](../struct.ContextOf.html) docs for more details. pub struct BufferBinderOf<A, E> { array: A, element_array: E, _phantom: PhantomData<*mut ()> } /// A part of the OpenGL context that has all free buffer bindings. pub type BufferBinder = BufferBinderOf<ArrayBufferBinder, ElementArrayBufferBinder>; impl<A, E> BufferBinderOf<A, E> { /// Get the current buffer-object binders. /// /// # Safety /// The same rules apply to this method as the /// [`ContextOf::current_context()` method] /// (../struct.ContextOf.html#method.current_context). pub unsafe fn current() -> BufferBinder { BufferBinderOf { array: ArrayBufferBinder::current(), element_array: ElementArrayBufferBinder::current(), _phantom: PhantomData } } fn borrowed_mut<'a, BA, BE>(&'a mut self) -> BufferBinderOf<&'a mut BA, &'a mut BE> where A: BorrowMut<BA>, E: BorrowMut<BE> { BufferBinderOf { array: self.array.borrow_mut(), element_array: self.element_array.borrow_mut(), _phantom: PhantomData } } /// Replace the array-buffer-related context with a new value, returning /// the old value and a new buffer context. pub fn swap_array<NA>(self, new_array: NA) -> (A, BufferBinderOf<NA, E>) { ( self.array, BufferBinderOf { array: new_array, element_array: self.element_array, _phantom: PhantomData
} /// Replace the element-array-buffer-related context with a new value, /// returning the old value and a new buffer context. pub fn swap_element_array<NE>(self, new_element_array: NE) -> (E, BufferBinderOf<A, NE>) { ( self.element_array, BufferBinderOf { array: self.array, element_array: new_element_array, _phantom: PhantomData } ) } } impl<'a, A, E> ToRef<'a> for BufferBinderOf<A, E> where A: 'a + ToRef<'a>, E: 'a + ToRef<'a> { type Ref = BufferBinderOf<A::Ref, E::Ref>; fn to_ref(&'a self) -> Self::Ref { BufferBinderOf { array: self.array.to_ref(), element_array: self.element_array.to_ref(), _phantom: PhantomData } } } impl<'a, A, E> ToMut<'a> for BufferBinderOf<A, E> where A: 'a + ToMut<'a>, E: 'a + ToMut<'a> { type Mut = BufferBinderOf<A::Mut, E::Mut>; fn to_mut(&'a mut self) -> Self::Mut { BufferBinderOf { array: self.array.to_mut(), element_array: self.element_array.to_mut(), _phantom: PhantomData } } } /// The OpenGL state representing the `GL_ARRAY_BUFFER` target. pub struct ArrayBufferBinder { _phantom: PhantomData<*mut ()> } impl ArrayBufferBinder { /// Get the current `GL_ARRAY_BUFFER` binder. /// /// # Safety /// The same rules apply to this method as the /// [`ContextOf::current_context()` method] /// (../struct.ContextOf.html#method.current_context). pub unsafe fn current() -> Self { ArrayBufferBinder { _phantom: PhantomData } } /// Bind a buffer to the `GL_ARRAY_BUFFER` target, returning a binding. pub fn bind<'a>(&mut self, buffer: &'a mut Buffer) -> ArrayBufferBinding<'a> { let binding = ArrayBufferBinding { _phantom_ref: PhantomData, _phantom_ptr: PhantomData }; _bind_buffer(binding.target(), buffer); binding } } /// The OpenGL state representing the `GL_ELEMENT_ARRAY_BUFFER` target. pub struct ElementArrayBufferBinder { _phantom: PhantomData<*mut ()> } impl ElementArrayBufferBinder { /// Get the current `GL_ELEMENT_ARRAY_BUFFER` binder. /// /// # Safety /// The same rules apply to this method as the /// [`ContextOf::current_context()` method] /// (../struct.ContextOf.html#method.current_context). pub unsafe fn current() -> Self { ElementArrayBufferBinder { _phantom: PhantomData } } /// Bind a buffer to the `GL_ELEMENT_ARRAY_BUFFER` target, returning /// a binding. pub fn bind<'a>(&mut self, buffer: &'a mut Buffer) -> ElementArrayBufferBinding<'a> { let binding = ElementArrayBufferBinding { _phantom_ref: PhantomData, _phantom_ptr: PhantomData }; _bind_buffer(binding.target(), buffer); binding } }
} )
permutation.py
#!/usr/bin/env python3 # # Wrapper functions for the permutation feature importance. # ##################################################### SOURCE START ##################################################### import numpy as np import matplotlib.pyplot as mpl import sklearn.inspection ### Calculate permutation importance, and set the feature importance ### (mean of permutation importance for each trial) as model.feature_importances_. def permutation_feature_importance(model, Xs, ys, **kwargs): ### Calculate permutation importance. permutation_importance = sklearn.inspection.permutation_importance(model, Xs, ys, **kwargs) ### Calculate the average of permutation importance for each feature and set the average values ### as model.feature_importances_ for providing compatible interface with scikit-learn. setattr(model, "feature_importances_", permutation_importance.importances_mean) return permutation_importance.importances ### Visualize permutation importance as a box diagram. ### The input arguments are: ### - permutation_importance: np.array with shape (num_features, num_repeats), ### - feature_names: list with length num_features, ### - show: True or False. def permutation_plot(permutation_importances, feature_names, show = True): ### Sort faetures by the average of permutation order.
##################################################### SOURCE FINISH #################################################### # Author: Tetsuya Ishikawa <[email protected]> # vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
sorted_idx = np.mean(permutation_importances, axis = 1).argsort() importances = permutation_importances[sorted_idx].T label_names = feature_names[sorted_idx] ### Plot box diagram. mpl.boxplot(importances, labels = label_names, vert = False) mpl.xlabel("Permutation feature importances (impact on model output)") mpl.grid() if show: mpl.show()