file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
store.go
package simplexds import ( "math/rand" "sync" core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/wozz/simplexds/mesh" ) func
(max int) int { if rand.Intn(10) == 0 { return 0 } return rand.Intn(max) } // emulate nodes moving between priorities randomly func setRandomPrioritiesForNodesPerCluster(nodes []*node) (newNodeList []*node) { nodesPerCluster := make(map[string][]*node) for _, n := range nodes { nodesPerCluster[n.Cluster()] = append(nodesPerCluster[n.Cluster()], n) } for _, clusterNodes := range nodesPerCluster { // randomly set all nodes to priority zero to emulate panic mode where localities are unknown // or too many local nodes are unhealthy maxPriority := randMaxPriorityOrZero(len(clusterNodes)) nextPriority := 0 for _, nodeInCluster := range clusterNodes { if nextPriority > maxPriority { if maxPriority == 0 { nodeInCluster.priority = 0 } else { nodeInCluster.priority = rand.Intn(maxPriority) } } else { nodeInCluster.priority = nextPriority nextPriority++ } } newNodeList = append(newNodeList, clusterNodes...) } return } type node struct { nodeInfo *core.Node ipAddress string priority int streamID int64 } func (n *node) ID() string { return n.nodeInfo.GetId() } func (n *node) IP() string { return n.ipAddress } func (n *node) Cluster() string { return n.nodeInfo.GetCluster() } func (n *node) NodeType() string { return n.nodeInfo.GetMetadata().GetFields()["node_type"].GetStringValue() } func (n *node) PreferredPort() int { return int(n.nodeInfo.GetMetadata().GetFields()["preferred_port"].GetNumberValue()) } func (n *node) Priority() int { return n.priority } type store struct { mu sync.Mutex nodeMap map[int64]*node } func (s *store) shufflePriorities() { s.mu.Lock() defer s.mu.Unlock() var nodeList []*node for _, n := range s.nodeMap { nodeList = append(nodeList, n) } newNodeList := setRandomPrioritiesForNodesPerCluster(nodeList) for _, n := range newNodeList { s.nodeMap[n.streamID] = n } } func (s *store) allNodes() (nodeInfos []mesh.NodeInfo) { s.mu.Lock() defer s.mu.Unlock() for _, node := range s.nodeMap { nodeInfos = append(nodeInfos, node) } return } func (s *store) updateNode(id int64, nodeInfo *core.Node) (updated bool) { s.mu.Lock() defer s.mu.Unlock() node, ok := s.nodeMap[id] if !ok { return } if node.nodeInfo == nil { updated = true } node.nodeInfo = nodeInfo return } func (s *store) getGWNodeIDs() (ids []string) { s.mu.Lock() defer s.mu.Unlock() for _, node := range s.nodeMap { if node == nil { continue } if node.NodeType() == "gateway" { ids = append(ids, node.ID()) } } return } func (s *store) getNode(id int64) *node { s.mu.Lock() defer s.mu.Unlock() return s.nodeMap[id] } func (s *store) addNode(id int64, ip string) { s.mu.Lock() defer s.mu.Unlock() s.nodeMap[id] = &node{ ipAddress: ip, streamID: id, } } func (s *store) removeNode(id int64) { s.mu.Lock() defer s.mu.Unlock() delete(s.nodeMap, id) }
randMaxPriorityOrZero
behavioural_planner.py
#!/usr/bin/env python3 import numpy as np import math from obstacle_detection import check_for_obs # State machine states FOLLOW_LANE = 0 DECELERATE_TO_STOP = 1 STAY_STOPPED = 2 # Stop speed threshold STOP_THRESHOLD = 0.02 # Number of cycles before moving from stop sign. SEMAPHORE = 3 STOP_COUNTS = 10 class BehaviouralPlanner: def __init__(self, lookahead, lead_vehicle_lookahead, traffic_light_state): self._lookahead = lookahead self._follow_lead_vehicle_lookahead = lead_vehicle_lookahead self._state = FOLLOW_LANE self._follow_lead_vehicle = False self._obstacle_on_lane = False self._goal_state = [0.0, 0.0, 0.0] self._goal_index = 0 self._stop_count = 0 self._lookahead_collision_index = 0 self.traffic_light_state = traffic_light_state def set_lookahead(self, lookahead): self._lookahead = lookahead # Handles state transitions and computes the goal state. def transition_state(self, waypoints, ego_state, closed_loop_speed, camera_data, potential_obs): """Handles state transitions and computes the goal state. args: waypoints: current waypoints to track (global frame). length and speed in m and m/s. (includes speed to track at each x,y location.) format: [[x0, y0, v0], [x1, y1, v1], ... [xn, yn, vn]] example: waypoints[2][1]: returns the 3rd waypoint's y position waypoints[5]: returns [x5, y5, v5] (6th waypoint) ego_state: ego state vector for the vehicle. (global frame) format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed] ego_x and ego_y : position (m) ego_yaw : top-down orientation [-pi to pi] ego_open_loop_speed : open loop speed (m/s) closed_loop_speed: current (closed-loop) speed for vehicle (m/s) variables to set: self._goal_index: Goal index for the vehicle to reach i.e. waypoints[self._goal_index] gives the goal waypoint self._goal_state: Goal state for the vehicle to reach (global frame) format: [x_goal, y_goal, v_goal] self._state: The current state of the vehicle. available states: FOLLOW_LANE : Follow the global waypoints (lane). DECELERATE_TO_STOP : Decelerate to stop. STAY_STOPPED : Stay stopped. self._stop_count: Counter used to count the number of cycles which the vehicle was in the STAY_STOPPED state so far. useful_constants: STOP_THRESHOLD : Stop speed threshold (m). The vehicle should fully stop when its speed falls within this threshold. STOP_COUNTS : Number of cycles (simulation iterations) before moving from stop sign. """ # In this state, continue tracking the lane by finding the # goal index in the waypoint list that is within the lookahead # distance. Then, check to see if the waypoint path intersects # with any stop lines. If it does, then ensure that the goal # state enforces the car to be stopped before the stop line. # You should use the get_closest_index(), get_goal_index(), and # check_for_stop_signs() helper functions. # Make sure that get_closest_index() and get_goal_index() functions are # complete, and examine the check_for_stop_signs() function to # understand it. if self._state == FOLLOW_LANE: print("FOLLOW_LANE") # First, find the closest index to the ego vehicle. update_waypoints(self,waypoints,ego_state) tl_state = self.traffic_light_state.detect_traffic_light(camera_data) #TODO add trafficlight detection and collision_prediction # to change from state "FOLLOW_LANE" to state "DECELERATE_TO_STOP" if check_for_obs(potential_obs, ego_state,is_collision = False) or tl_state == 1: self._goal_state[2] = 0 self._state = DECELERATE_TO_STOP elif self._state == DECELERATE_TO_STOP: print("DECELERATE TO STOP...") update_waypoints(self,waypoints,ego_state) tl_state = self.traffic_light_state.detect_traffic_light(camera_data) if abs(closed_loop_speed) <= STOP_THRESHOLD: self._goal_state[2]=0 self._state = STAY_STOPPED self._stop_count = 0 elif tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False): self._state = FOLLOW_LANE # In this state, check to see if we have stayed stopped for at # least STOP_COUNTS number of cycles. If so, we can now leave # the stop sign and transition to the next state. elif self._state == STAY_STOPPED: print("STAY_STOPPED") #TODO here make sure if no restriction change to state "FOLLOW_LANE" # We have stayed stopped for the required number of cycles. # Allow the ego vehicle to leave the stop sign. Once it has # passed the stop sign, return to lane following. # You should use the get_closest_index(), get_goal_index(), and # check_for_stop_signs() helper functions. print("Waiting: " + str(self._stop_count)) tl_state = self.traffic_light_state.detect_traffic_light(camera_data) if self._stop_count == STOP_COUNTS: update_waypoints(self, waypoints, ego_state) if tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False): self._state = FOLLOW_LANE self._stop_count = 0 elif tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False): self._stop_count += 1 else: raise ValueError('Invalid state value.') # Gets the goal index in the list of waypoints, based on the lookahead and # the current ego state. In particular, find the earliest waypoint that has accumulated # arc length (including closest_len) that is greater than or equal to self._lookahead. def get_goal_index(self, waypoints, ego_state, closest_len, closest_index): """Gets the goal index for the vehicle. Set to be the earliest waypoint that has accumulated arc length accumulated arc length (including closest_len) that is greater than or equal to self._lookahead. args: waypoints: current waypoints to track. (global frame) length and speed in m and m/s. (includes speed to track at each x,y location.) format: [[x0, y0, v0], [x1, y1, v1], ... [xn, yn, vn]] example: waypoints[2][1]: returns the 3rd waypoint's y position waypoints[5]: returns [x5, y5, v5] (6th waypoint) ego_state: ego state vector for the vehicle. (global frame) format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed] ego_x and ego_y : position (m) ego_yaw : top-down orientation [-pi to pi] ego_open_loop_speed : open loop speed (m/s) closest_len: length (m) to the closest waypoint from the vehicle. closest_index: index of the waypoint which is closest to the vehicle. i.e. waypoints[closest_index] gives the waypoint closest to the vehicle. returns: wp_index: Goal index for the vehicle to reach i.e. waypoints[wp_index] gives the goal waypoint """ # Find the farthest point along the path that is within the # lookahead distance of the ego vehicle. # Take the distance from the ego vehicle to the closest waypoint into # consideration. arc_length = closest_len wp_index = closest_index # In this case, reaching the closest waypoint is already far enough for # the planner. No need to check additional waypoints. if arc_length > self._lookahead: return wp_index # We are already at the end of the path. if wp_index == len(waypoints) - 1: return wp_index # Otherwise, find our next waypoint. while wp_index < len(waypoints) - 1: arc_length += np.sqrt((waypoints[wp_index][0] - waypoints[wp_index+1][0])**2 + (waypoints[wp_index][1] - waypoints[wp_index+1][1])**2) if arc_length > self._lookahead: break wp_index += 1 return wp_index % len(waypoints) # Checks to see if we need to modify our velocity profile to accomodate the # lead vehicle. def check_for_lead_vehicle(self, ego_state, lead_car_position): """Checks for lead vehicle within the proximity of the ego car, such that the ego car should begin to follow the lead vehicle. args: ego_state: ego state vector for the vehicle. (global frame) format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed] ego_x and ego_y : position (m) ego_yaw : top-down orientation [-pi to pi] ego_open_loop_speed : open loop speed (m/s) lead_car_position: The [x, y] position of the lead vehicle. Lengths are in meters, and it is in the global frame. sets: self._follow_lead_vehicle: Boolean flag on whether the ego vehicle should follow (true) the lead car or not (false). """ # Check lead car position delta vector relative to heading, as well as # distance, to determine if car should be followed. # Check to see if lead vehicle is within range, and is ahead of us. if not self._follow_lead_vehicle: # Compute the angle between the normalized vector between the lead vehicle # and ego vehicle position with the ego vehicle's heading vector. lead_car_delta_vector = [lead_car_position[0] - ego_state[0], lead_car_position[1] - ego_state[1]] lead_car_distance = np.linalg.norm(lead_car_delta_vector) # In this case, the car is too far away. if lead_car_distance > self._follow_lead_vehicle_lookahead: return lead_car_delta_vector = np.divide(lead_car_delta_vector, lead_car_distance) ego_heading_vector = [math.cos(ego_state[2]), math.sin(ego_state[2])] # Check to see if the relative angle between the lead vehicle and the ego # vehicle lies within +/- 45 degrees of the ego vehicle's heading. if np.dot(lead_car_delta_vector, ego_heading_vector) < (1 / math.sqrt(2)): return self._follow_lead_vehicle = True else: lead_car_delta_vector = [lead_car_position[0] - ego_state[0], lead_car_position[1] - ego_state[1]] lead_car_distance = np.linalg.norm(lead_car_delta_vector) # Add a 15m buffer to prevent oscillations for the distance check. if lead_car_distance < self._follow_lead_vehicle_lookahead + 15: return # Check to see if the lead vehicle is still within the ego vehicle's # frame of view. lead_car_delta_vector = np.divide(lead_car_delta_vector, lead_car_distance) ego_heading_vector = [math.cos(ego_state[2]), math.sin(ego_state[2])] if np.dot(lead_car_delta_vector, ego_heading_vector) > (1 / math.sqrt(2)): return self._follow_lead_vehicle = False # Compute the waypoint index that is closest to the ego vehicle, and return # it as well as the distance from the ego vehicle to that waypoint. def get_closest_index(waypoints, ego_state): """Gets closest index a given list of waypoints to the vehicle position. args: waypoints: current waypoints to track. (global frame) length and speed in m and m/s. (includes speed to track at each x,y location.) format: [[x0, y0, v0], [x1, y1, v1], ... [xn, yn, vn]] example: waypoints[2][1]: returns the 3rd waypoint's y position waypoints[5]: returns [x5, y5, v5] (6th waypoint) ego_state: ego state vector for the vehicle. (global frame) format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed] ego_x and ego_y : position (m) ego_yaw : top-down orientation [-pi to pi] ego_open_loop_speed : open loop speed (m/s) returns: [closest_len, closest_index]: closest_len: length (m) to the closest waypoint from the vehicle. closest_index: index of the waypoint which is closest to the vehicle. i.e. waypoints[closest_index] gives the waypoint closest to the vehicle. """ closest_len = float('Inf') closest_index = 0 for i in range(len(waypoints)): temp = (waypoints[i][0] - ego_state[0])**2 + (waypoints[i][1] - ego_state[1])**2 if temp < closest_len: closest_len = temp closest_index = i closest_len = np.sqrt(closest_len) return closest_len, closest_index # Checks if p2 lies on segment p1-p3, if p1, p2, p3 are collinear. def pointOnSegment(p1, p2, p3): if (p2[0] <= max(p1[0], p3[0]) and (p2[0] >= min(p1[0], p3[0])) and \ (p2[1] <= max(p1[1], p3[1])) and (p2[1] >= min(p1[1], p3[1]))): return True else: return False def update_waypoints(self, waypoints, ego_state): #First, find the closest index to the ego vehicle closest_len, closest_index = get_closest_index(waypoints, ego_state) # Next, find the goal index that lies within the lookahed distance # along the waypoints goal_index = self.get_goal_index(waypoints, ego_state, closest_len, closest_index) while waypoints[goal_index][2] <= 0.1: goal_index += 1
self._goal_index = goal_index self._goal_state = waypoints[goal_index]
send_commands_netmiko.py
from netmiko import ConnectHandler import yaml from pprint import pprint def send_show_command(device, show_command): with ConnectHandler(**device) as ssh: ssh.enable() result = ssh.send_command(show_command) return result def send_config_commands(device, config_commands): with ConnectHandler(**device) as ssh: ssh.enable() result = ssh.send_config_set(config_commands) return result def send_commands(device, config=None, show=None):
if __name__ == "__main__": commands = ["logging 10.255.255.1", "logging buffered 20010", "no logging console"] show_command = "sh ip int br" with open("devices.yaml") as f: dev_list = yaml.safe_load(f) send_commands(dev_list, config=commands) send_commands(dev_list, show=show_command)
if show: return send_show_command(device_list, show) elif config: return send_config_commands(device_list, config)
pygame_ess.py
# This file is part of Cryptography GUI, licensed under the MIT License. # Copyright (c) 2020 Benedict Woo Jun Kai # See LICENSE.md for more details. ###################################### # Import and initialize the librarys # ###################################### import logging import pygame import glob import os import traceback import textwrap from config import config ################## # Initialization # ################## logging.info('Loading pygame essentials classes...') pygame.init() screen = pygame.display.set_mode(config.screen_res()) ######################## # Essentials functions # ######################## class pygame_ess: '''Essentials classes, functions and variables for pygame''' ############################# # Shared / common variables # ############################# alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # Common defined colours class colour: '''Common colour types in RGB tuple form''' black = (0, 0, 0) white = (255, 255, 255) red = (255, 0, 0) gray = (43, 43, 43) whiteish = (213, 213, 213) ################# # Create events # ################# class create: '''Creating for pygame shapes and surfaces''' def surface(size:tuple = (1024, 768), background_fill:tuple = None, is_alpha:bool = False): # -> pygame.surface.Surface: '''Create a new surface''' # Create window based on if is alpha if is_alpha: window = pygame.surface.Surface(size, pygame.SRCALPHA) else: window = pygame.surface.Surface(size) # set background color if background_fill != None: window.fill(background_fill) return window ############### # Load events # ############### class load: '''Loading of images and objects to surfaces''' def images(image_page:list, file_type:str = '.png', is_alpha:bool = False) -> dict: '''Load all images in a given directory to pygame surface''' # Define variables images = dict() image_dir = 'images/{}/'.format('/'.join(image_page)) # If in code directory and not root, go back a step if os.path.basename(os.getcwd()) == 'code': image_dir = '../' + image_dir # Get all image file from givent directory image_dir_list = glob.glob(image_dir+"*"+file_type) # Warn on empty dir if len(image_dir_list) == 0: logging.error('No image found in {}'.format(image_dir)) # Load them into pygame for image in image_dir_list: image_name = image.split('/')[-1].split('\\')[-1].split('.')[0] images[image_name] = pygame.image.load(image).convert_alpha() if is_alpha else pygame.image.load(image).convert() images[image_name] = pygame.transform.smoothscale(images[image_name], (int(images[image_name].get_width()*config.scale_w()), int(images[image_name].get_height()*config.scale_w()))) return images def text(surface, object) -> pygame.surface.Surface: '''Load text of object, includes things liek text warping, align and multi line support''' # Grap text_data text_data = object.meta # Warp text if specified if text_data.warp_text != None: warpped_text = textwrap.wrap(text_data.text, width=text_data.warp_text) for line in range(len(warpped_text)): if text_data.align == 'left': warpped_text[line] = '{1:<{0}}'.format(text_data.warp_text, warpped_text[line]).rstrip() elif text_data.align == 'center': warpped_text[line] = '{1:^{0}}'.format(text_data.warp_text, warpped_text[line]).rstrip() elif text_data.align == 'right': warpped_text[line] = '{1:>{0}}'.format(text_data.warp_text, warpped_text[line]).rstrip() else: logging.error('Invalid alignment type {}'.format(text_data.align)) # No text wrapping defined else: warpped_text = [text_data.text] # Generate surface for text text_surface = pygame_ess.create.surface(object.frame.box_size(), is_alpha=True) # Render multi line text h = 0 for line in warpped_text: line_text = pygame.font.Font(text_data.font_type, text_data.font_size) rendered_text = line_text.render(line, True, text_data.colour) text_surface.blit(rendered_text, (0, h)) h += line_text.size(line)[1] # Load to surface surface.blit(text_surface, (object.frame.box_coord())) return text_surface def object(surface, object, state:str = '', load_text:bool = True) -> None: '''Load an object to a pygame surface''' surface.blit(object.images[object.type+state], (object.frame.image_coord())) # Load text of object is a textfield if object.type == 'textfield': pygame_ess.load.text(surface, object) def objects(surface, objects:dict, names:list) -> None: '''Load mutliple objects to a pygame surface''' # Loop through object specified and load them for name in names: # Try to load object specified try: pygame_ess.load.object(surface, objects[name]) # Error loading object except: logging.error('{} object not in objects dictionary.'.format(name)) def surface(surface, window) -> None: '''Load a surface onto another pygame surface''' surface.blit(window.surface, window.frame.box_coord()) def screen(surface, objects:dict) -> None: '''Load all objects given to screen''' # Load objects to window for object in objects.values(): # Load image of item pygame_ess.load.object(surface, object) ################## # Display events # ################## class display: '''Display text, objects and surfaces to screen''' def object(window, object, state:str = '', direct_to_screen:bool = False) -> None: '''Display an object to screen''' if direct_to_screen: screen.blit(object.images[object.type+state], (object.frame.image_coord())) pygame_ess.update() else: pygame_ess.load.object(window.surface, object, state) pygame_ess.display.screen(window) def objects(window, objects:dict, names:list, direct_to_screen:bool = False) -> None: '''Display mutliple objects to screen''' # Draw direct to screen if direct_to_screen: # Loop through object specified and load them for name in names: # Try to load object specified try: screen.blit(objects[name].images[objects[name].type], (objects[name].frame.image_coord())) # Error loading object except: logging.error('[{}] {} object not in objects dictionary.'.format(window.name, name)) pygame_ess.update() # Load objects to surface, then display to screen else: pygame_ess.load.objects(window.surface, objects, names) pygame_ess.display.screen(window) def surface(window, window_to_merge): '''Display a surface given to screen''' pygame_ess.load.surface(window.surface, window_to_merge) pygame_ess.display.screen(window) def screen(window, update_all:bool = False, objects:dict = None, animate:bool = False) -> None: '''Display all objects given to screen''' # Update all objects of the surface if update_all: pygame_ess.load.screen(window.surface, objects)
if animate: for i in range(768, -1, -48): window.frame.bx = i screen.blit(window.surface, (window.frame.bx, window.frame.by)) pygame_ess.update() if pygame_ess.buffer(window): return 'quit' else: screen.blit(window.surface, (window.frame.bx, window.frame.by)) pygame_ess.update() ##################### # Interaction event # ##################### class event: '''Process actios by the user''' def selection(window, selection_objects:dict, direct_to_screen:bool = False) -> dict: '''Check for mouse hover and selections''' selection_result = {'object_name':'', 'object_type':'', 'action_result':''} for selection_object in selection_objects.values(): # Skip selection check if runclass is empty if selection_object.runclass != None: # Check if mouse in selection object box mouse_hover_over_object = False while selection_object.in_box(pygame.mouse.get_pos(), window.frame.box_coord()): # Change to hover type if selection_object.hover_action and not mouse_hover_over_object: # Draws hover to surface pygame_ess.display.object(window, selection_object, '_hover', direct_to_screen) mouse_hover_over_object = True logging.debug('[{}] Hovered on {} {}'.format(window.name, selection_object.name, selection_object.type)) # Run click event click_result = pygame_ess.event.click(window, selection_object, selection_objects) # If clicked on object if click_result != False: # Remove mouse hover if mouse_hover_over_object: pygame_ess.load.object(window.surface, selection_object, '', direct_to_screen) # Load back previous screen if click_result == True: pygame_ess.display.screen(window) logging.info('loaded {}.'.format(window.name)) # Stores click_result selection_result['object_name'] = selection_object.name selection_result['object_type'] = selection_object.type selection_result['action_result'] = click_result # Return data of click result logging.info('[{}] object_name:{}, object_type:{}, action_result:{}'.format(window.name, selection_result['object_name'], selection_result['object_type'], selection_result['action_result'])) return selection_result # Moved out of hitbox if mouse_hover_over_object: pygame_ess.display.object(window, selection_object, '', direct_to_screen) # No selections/clicks were made return selection_result def click(window, selection_object, selection_objects) -> any: '''Check if mouse click on objects, and run defined actions''' for event in pygame.event.get(): # Check for left click if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: logging.info('[{}] Clicked on {} {}'.format(window.name, selection_object.name, selection_object.type)) # When there is no function to run if type(selection_object.runclass) == str: return selection_object.runclass # Load new screen try: # Use selection_object as parameter if selection_object.runclass_parameter == True: return selection_object.runclass(window, selection_object) # No parameter needed elif selection_object.runclass_parameter == False: return selection_object.runclass() # Use custom parameter else: return selection_object.runclass(selection_object.runclass_parameter) # When errors loading screen/runclass except: logging.error('error running {} runclass.'.format(selection_object.runclass)) traceback.print_exc() return True # When press closed windows if event.type == pygame.QUIT: return 'quit' # Check for other events pygame_ess.event.scroll(window, event) pygame_ess.event.keyboard(selection_objects, event) # User did not click return False def keyboard(selection_objects, event): pass def scroll(window, event) -> None: '''Scrolling of surface''' # Check if scrolling is needed if config.screen.height - window.frame.h < 0 and window.scroll: # Check of scroll action if event.type == pygame.MOUSEBUTTONDOWN: # Scroll up if event.button == 4: window.frame.by = min(window.frame.by + config.scroll_speed, 0) pygame_ess.display.screen(window) logging.debug('[{}] scrolled up {}'.format(window.name, window.frame.by)) # Scroll down elif event.button == 5: window.frame.by = max(window.frame.by - config.scroll_speed, min(config.screen.height - window.frame.h, 0)) pygame_ess.display.screen(window) logging.debug('[{}] scrolled down {}'.format(window.name, window.frame.by)) ######################## # Other core functions # ######################## def set_caption(caption:str = 'pygame time!'): '''Set window header title''' pygame.display.set_caption(caption) logging.debug('window captions set to {}'.format(caption)) def update(tick:int = config.ticks): '''Draw display changes to screen''' pygame.display.flip() pygame.display.update() pygame.time.Clock().tick_busy_loop(tick) def buffer(window) -> bool: '''Loop through pygame events and check of quit and scrolling''' for event in pygame.event.get(): if event.type == pygame.QUIT: return True pygame_ess.event.scroll(window, event) def quit(): '''Exit from program''' logging.info('Exiting program...') pygame.quit()
# Ouput window to screen
main.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // godoc2md converts godoc formatted package documentation into Markdown format. // // // Usage // // godoc2md $PACKAGE > $GOPATH/src/$PACKAGE/README.md package main import ( "bytes" "flag" "fmt" "go/build" "io/ioutil" "log" "os" "path" "path/filepath" "runtime" "strings" "text/template" "golang.org/x/tools/godoc" "golang.org/x/tools/godoc/vfs" ) var ( verbose = flag.Bool("v", false, "verbose mode") // file system roots // TODO(gri) consider the invariant that goroot always end in '/' goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory") // layout control tabWidth = flag.Int("tabwidth", 4, "tab width") showTimestamps = flag.Bool("timestamps", false, "show timestamps with directory listings") altPkgTemplate = flag.String("template", "", "path to an alternate template file") showPlayground = flag.Bool("play", false, "enable playground in web interface") showExamples = flag.Bool("ex", false, "show examples in command line mode") declLinks = flag.Bool("links", true, "link identifiers to their declarations") // The hash format for Github is the default `#L%d`; but other source control platforms do not // use the same format. For example Bitbucket Enterprise uses `#%d`. This option provides the // user the option to switch the format as needed and still remain backwards compatible. srcLinkHashFormat = flag.String("hashformat", "#L%d", "source link URL hash format") srcLinkFormat = flag.String("srclink", "", "if set, format for entire source link") ) func usage() { fmt.Fprintf(os.Stderr, "usage: godoc2md package [name ...]\n") flag.PrintDefaults() os.Exit(2) } var ( pres *godoc.Presentation fs = vfs.NameSpace{} funcs = map[string]interface{}{ "comment_md": commentMdFunc, "base": path.Base, "md": mdFunc, "pre": preFunc, "kebab": kebabFunc, "bitscape": bitscapeFunc, //Escape [] for bitbucket confusion "trim_prefix": strings.TrimPrefix, } ) func commentMdFunc(comment string) string { var buf bytes.Buffer ToMD(&buf, comment) return buf.String() } func mdFunc(text string) string { text = strings.Replace(text, "*", "\\*", -1) text = strings.Replace(text, "_", "\\_", -1) return text } func preFunc(text string) string { return "``` go\n" + text + "\n```" } // Original Source https://github.com/golang/tools/blob/master/godoc/godoc.go#L562 func srcLinkFunc(s string) string { s = path.Clean("/" + s) if !strings.HasPrefix(s, "/src/") { s = "/src" + s } return s } // Removed code line that always substracted 10 from the value of `line`. // Made format for the source link hash configurable to support source control platforms other than Github. // Original Source https://github.com/golang/tools/blob/master/godoc/godoc.go#L540 func srcPosLinkFunc(s string, line, low, high int) string { if *srcLinkFormat != "" { return fmt.Sprintf(*srcLinkFormat, s, line, low, high)
s = srcLinkFunc(s) var buf bytes.Buffer template.HTMLEscape(&buf, []byte(s)) // selection ranges are of form "s=low:high" if low < high { fmt.Fprintf(&buf, "?s=%d:%d", low, high) // no need for URL escaping if line < 1 { line = 1 } } // line id's in html-printed source are of the // form "L%d" (on Github) where %d stands for the line number if line > 0 { fmt.Fprintf(&buf, *srcLinkHashFormat, line) // no need for URL escaping } return buf.String() } func readTemplate(name, data string) *template.Template { // be explicit with errors (for app engine use) t, err := template.New(name).Funcs(pres.FuncMap()).Funcs(funcs).Parse(data) if err != nil { log.Fatal("readTemplate: ", err) } return t } func kebabFunc(text string) string { s := strings.Replace(strings.ToLower(text), " ", "-", -1) s = strings.Replace(s, ".", "-", -1) s = strings.Replace(s, "\\*", "42", -1) return s } func bitscapeFunc(text string) string { s := strings.Replace(text, "[", "\\[", -1) s = strings.Replace(s, "]", "\\]", -1) return s } func main() { flag.Usage = usage flag.Parse() // Check usage if flag.NArg() == 0 { usage() } // use file system of underlying OS fs.Bind("/", vfs.OS(*goroot), "/", vfs.BindReplace) // Bind $GOPATH trees into Go root. for _, p := range filepath.SplitList(build.Default.GOPATH) { fs.Bind("/src/pkg", vfs.OS(p), "/src", vfs.BindAfter) } corpus := godoc.NewCorpus(fs) corpus.Verbose = *verbose pres = godoc.NewPresentation(corpus) pres.TabWidth = *tabWidth pres.ShowTimestamps = *showTimestamps pres.ShowPlayground = *showPlayground pres.ShowExamples = *showExamples pres.DeclLinks = *declLinks pres.SrcMode = false pres.HTMLMode = false pres.URLForSrcPos = srcPosLinkFunc pres.AllMode = true if *altPkgTemplate != "" { buf, err := ioutil.ReadFile(*altPkgTemplate) if err != nil { log.Fatal(err) } pres.PackageText = readTemplate("package.txt", string(buf)) } else { pres.PackageText = readTemplate("package.txt", pkgTemplate) } if err := godoc.CommandLine(os.Stdout, fs, pres, flag.Args()); err != nil { log.Print(err) } }
}
mod.rs
use analyze; use parse::tree::{Class, CompilationUnit, Method, VariableDeclarator}; use std::any::Any; use tokenize::span::Span; #[cfg(test)] pub mod test_common; pub mod block; pub mod compilation_unit; pub mod def; pub mod expr; pub mod import; pub mod statement; pub mod tpe; #[derive(Debug, PartialEq)] pub struct Usage<'def> { pub span: Span<'def>, pub def: Definition<'def>, } #[derive(Debug, PartialEq, Copy, Clone)] pub enum Definition<'a> { Package(*const analyze::definition::Package<'a>), Class(*const analyze::definition::Class<'a>), Method(*const analyze::definition::MethodDef<'a>), Field(*const analyze::definition::FieldDef<'a>), TypeParam(*const analyze::definition::TypeParam<'a>), Param(*const analyze::definition::Param<'a>), VariableDeclarator(*const VariableDeclarator<'a>), } impl<'a> Definition<'a> { pub fn ptr(&self) -> usize { match self { Definition::Package(p) => *p as usize, Definition::Class(c) => *c as usize, Definition::Method(m) => *m as usize, Definition::VariableDeclarator(v) => *v as usize, Definition::Field(f) => *f as usize, Definition::TypeParam(t) => *t as usize, Definition::Param(p) => *p as usize, } } pub fn span(&self) -> Option<&Span<'a>> { match self { Definition::Package(_) => None, Definition::Class(c) => unsafe { &**c }.span_opt.as_ref(), Definition::Method(m) => unsafe { &**m }.span_opt.as_ref(), Definition::Field(f) => unsafe { &**f }.span_opt.as_ref(), Definition::VariableDeclarator(v) => { let v = unsafe { &**v }; Some(&v.name) } Definition::TypeParam(t) => unsafe { &**t }.span_opt.as_ref(), Definition::Param(p) => Some(&unsafe { &**p }.name), } } } #[derive(Debug, PartialEq)]
pub usages: Vec<Usage<'def>>, } pub fn apply<'def, 'def_ref>(unit: &'def_ref CompilationUnit<'def>) -> Overlay<'def> { let mut overlay = Overlay { defs: vec![], usages: vec![], }; compilation_unit::apply(unit, &mut overlay); overlay }
pub struct Overlay<'def> { pub defs: Vec<Definition<'def>>,
normal-services.spec.tsx
import 'reflect-metadata'; import React from 'react'; import { cleanup, render } from '@testing-library/react'; import { Service, ServiceContainer, useService } from '../src'; @Service() class UserService {} @Service() class ProductService {} describe('normal services', () => { afterEach(() => { cleanup(); }); it('renders without crashing', () => { function
() { return ( <ServiceContainer services={[UserService, ProductService]}> hello </ServiceContainer> ); } const { getByText } = render(<App />); expect(getByText('hello')).toBeTruthy(); }); it('obtains the correct service', () => { function Injecter() { const service = useService(UserService); expect(service instanceof UserService).toBeTruthy(); return <div>hello</div>; } function App() { return ( <ServiceContainer services={[UserService, ProductService]}> <Injecter /> </ServiceContainer> ); } render(<App />); }); it('default behavior provides singleton service', () => { function Injecter() { const service1 = useService(UserService); const service2 = useService(UserService); expect(service1 === service2).toBeTruthy(); return <div>hello</div>; } function App() { return ( <ServiceContainer services={[UserService, ProductService]}> <Injecter /> </ServiceContainer> ); } render(<App />); }); it('useClass works', () => { function Injecter() { const service = useService<UserService>('token'); expect(service instanceof UserService).toBeTruthy(); return <div>hello</div>; } function App() { return ( <ServiceContainer services={[{ provide: 'token', useClass: UserService }]} > <Injecter /> </ServiceContainer> ); } render(<App />); }); it('useValue works', () => { function Injecter() { const service = useService<UserService>('token'); expect(service instanceof UserService).toBeTruthy(); return <div>hello</div>; } function App() { return ( <ServiceContainer services={[{ provide: 'token', useValue: new UserService() }]} > <Injecter /> </ServiceContainer> ); } render(<App />); }); it('useFactory works', () => { function Injecter() { const service = useService<UserService>('token'); expect(service instanceof UserService).toBeTruthy(); return <div>hello</div>; } function App() { return ( <ServiceContainer services={[{ provide: 'token', useFactory: () => new UserService() }]} > <Injecter /> </ServiceContainer> ); } render(<App />); }); });
App
appcreds.go
package nmsdk import ( "errors" "fmt" "os" "path" vault "github.com/hashicorp/vault/api" "github.com/nats-io/jwt/v2" "github.com/nats-io/nats.go" ) func makeAppCredsPath(mount, prefix, app string) string { return path.Join(mount, "/data/", prefix, "/apps/creds/", app) } type AppCredsRetriever struct { vaultMount string vaultPrefix string appName string vault *vault.Client } func NewAppCredsRetrieverFromEnv(appName string) (AppCredsRetriever, error) { prefix := os.Getenv("NATS_MANAGER_VAULT_PREFIX") mount := os.Getenv("NATS_MANAGER_VAULT_MOUNT") vc, err := vault.NewClient(vault.DefaultConfig()) if err != nil { return AppCredsRetriever{}, err } return AppCredsRetriever{ vaultMount: mount, vaultPrefix: prefix, appName: appName, vault: vc, }, nil } func NewAppCredsRetriever(appName, vaultMount, vaultPrefix string, vault *vault.Client) AppCredsRetriever { return AppCredsRetriever{ appName: appName, vaultMount: vaultMount, vaultPrefix: vaultPrefix, vault: vault, } } func wipeBytes(bs []byte) { for i := range bs { bs[i] = 'X' } } func (r AppCredsRetriever) NatsCredsCBs() (nats.UserJWTHandler, nats.SignatureHandler) { getCreds := func() ([]byte, error) { secret, err := r.vault.Logical().Read(makeAppCredsPath(r.vaultMount, r.vaultPrefix, r.appName)) if err != nil { return nil, fmt.Errorf("could not read app credentials from Vault: %w", err) } secretData, ok := secret.Data["data"].(map[string]interface{}) if !ok { return nil, fmt.Errorf("invalid secret data: %w", err) } creds, ok := secretData["creds"].(string) if !ok { return nil, errors.New("could not extract credentials from secret") } return []byte(creds), nil } jwtCB := func() (string, error) { creds, err := getCreds() if err != nil
defer wipeBytes(creds) return jwt.ParseDecoratedJWT(creds) } signCB := func(nonce []byte) ([]byte, error) { creds, err := getCreds() if err != nil { return nil, err } defer wipeBytes(creds) nkey, err := jwt.ParseDecoratedUserNKey(creds) if err != nil { return nil, err } defer nkey.Wipe() return nkey.Sign(nonce) } return jwtCB, signCB }
{ return "", err }
release.go
package cli import ( "errors" "fmt" "os" "path/filepath" "github.com/jimeh/build-emacs-for-macos/pkg/plan" "github.com/jimeh/build-emacs-for-macos/pkg/release" "github.com/jimeh/build-emacs-for-macos/pkg/repository" cli2 "github.com/urfave/cli/v2" ) type releaseOptions struct { Plan *plan.Plan Repository *repository.Repository Name string GithubToken string } func
() *cli2.Command { tokenDefaultText := "" if len(os.Getenv("GITHUB_TOKEN")) > 0 { tokenDefaultText = "***" } return &cli2.Command{ Name: "release", Usage: "manage GitHub releases", Flags: []cli2.Flag{ &cli2.StringFlag{ Name: "plan", Usage: "path to build plan YAML file produced by " + "emacs-builder plan", Aliases: []string{"p"}, EnvVars: []string{"EMACS_BUILDER_PLAN"}, TakesFile: true, }, &cli2.StringFlag{ Name: "repository", Aliases: []string{"repo", "r"}, Usage: "owner/name of GitHub repo to check for release, " + "ignored if a plan is provided", EnvVars: []string{"GITHUB_REPOSITORY"}, Value: "jimeh/emacs-builds", }, &cli2.StringFlag{ Name: "name", Aliases: []string{"n"}, Usage: "name of release to operate on, ignored if plan " + "is provided", }, &cli2.StringFlag{ Name: "github-token", Usage: "GitHub API Token", EnvVars: []string{"GITHUB_TOKEN"}, DefaultText: tokenDefaultText, }, }, Subcommands: []*cli2.Command{ releaseCheckCmd(), releasePublishCmd(), releaseBulkCmd(), }, } } func releaseActionWrapper( f func(*cli2.Context, *Options, *releaseOptions) error, ) func(*cli2.Context) error { return actionWrapper(func(c *cli2.Context, opts *Options) error { rOpts := &releaseOptions{ Name: c.String("name"), GithubToken: c.String("github-token"), } if r := c.String("repository"); r != "" { var err error rOpts.Repository, err = repository.NewGitHub(r) if err != nil { return err } } if f := c.String("plan"); f != "" { p, err := plan.Load(f) if err != nil { return err } rOpts.Plan = p } return f(c, opts, rOpts) }) } func releaseCheckCmd() *cli2.Command { return &cli2.Command{ Name: "check", Usage: "check if a GitHub release exists and has specified " + "asset files", ArgsUsage: "[<asset-file> ...]", Action: releaseActionWrapper(releaseCheckAction), } } func releaseCheckAction( c *cli2.Context, opts *Options, rOpts *releaseOptions, ) error { rlsOpts := &release.CheckOptions{ Repository: rOpts.Repository, ReleaseName: rOpts.Name, AssetFiles: c.Args().Slice(), GithubToken: rOpts.GithubToken, } if rOpts.Plan != nil && rOpts.Plan.Release != nil { rlsOpts.ReleaseName = rOpts.Plan.Release.Name } if rOpts.Plan != nil && rOpts.Plan.Output != nil { rlsOpts.AssetFiles = []string{rOpts.Plan.Output.DiskImage} } return release.Check(c.Context, rlsOpts) } func releasePublishCmd() *cli2.Command { return &cli2.Command{ Name: "publish", Usage: "publish a GitHub release with specified asset " + "files", ArgsUsage: "[<asset-file> ...]", Flags: []cli2.Flag{ &cli2.StringFlag{ Name: "sha", Aliases: []string{"s"}, Usage: "git SHA to create release on", EnvVars: []string{"GITHUB_SHA"}, }, &cli2.StringFlag{ Name: "type", Aliases: []string{"t"}, Usage: "release type, must be normal, prerelease, or draft", Value: "normal", }, &cli2.StringFlag{ Name: "title", Usage: "release title, will use release name if not " + "specified", Value: "", }, }, Action: releaseActionWrapper(releasePublishAction), } } func releasePublishAction( c *cli2.Context, opts *Options, rOpts *releaseOptions, ) error { rlsOpts := &release.PublishOptions{ Repository: rOpts.Repository, CommitRef: c.String("release-sha"), ReleaseName: rOpts.Name, ReleaseTitle: c.String("title"), AssetFiles: c.Args().Slice(), GithubToken: rOpts.GithubToken, } rlsType := c.String("type") switch rlsType { case "draft": rlsOpts.ReleaseType = release.Draft case "prerelease": rlsOpts.ReleaseType = release.Prerelease case "normal": rlsOpts.ReleaseType = release.Normal default: return fmt.Errorf("invalid --type \"%s\"", rlsType) } if rOpts.Plan != nil { if rOpts.Plan.Release != nil { rlsOpts.ReleaseName = rOpts.Plan.Release.Name rlsOpts.ReleaseTitle = rOpts.Plan.Release.Title if rOpts.Plan.Release.Draft { rlsOpts.ReleaseType = release.Draft } else if rOpts.Plan.Release.Prerelease { rlsOpts.ReleaseType = release.Prerelease } } if rOpts.Plan.Output != nil { rlsOpts.AssetFiles = []string{ filepath.Join( rOpts.Plan.Output.Directory, rOpts.Plan.Output.DiskImage, ), } } } return release.Publish(c.Context, rlsOpts) } func releaseBulkCmd() *cli2.Command { return &cli2.Command{ Name: "bulk", Usage: "bulk modify GitHub releases", ArgsUsage: "", Flags: []cli2.Flag{ &cli2.StringFlag{ Name: "name", Usage: "regexp pattern matching release names to modify", }, &cli2.StringFlag{ Name: "prerelease", Usage: "change prerelease flag, must be \"true\" or " + "\"false\", otherwise prerelease value is not changed", }, &cli2.BoolFlag{ Name: "dry-run", Usage: "do not perform any changes", }, }, Action: releaseActionWrapper(releaseBulkAction), } } func releaseBulkAction( c *cli2.Context, opts *Options, rOpts *releaseOptions, ) error { bulkOpts := &release.BulkOptions{ Repository: rOpts.Repository, NamePattern: c.String("name"), DryRun: c.Bool("dry-run"), GithubToken: rOpts.GithubToken, } switch c.String("prerelease") { case "true": v := true bulkOpts.Prerelease = &v case "false": v := false bulkOpts.Prerelease = &v case "": default: return errors.New( "--prerelease by me \"true\" or \"false\" when specified", ) } return release.Bulk(c.Context, bulkOpts) }
releaseCmd
processor.ts
import type { Projection } from 'mongodb'; import type { TValue, MongoContext, Effect, EffectTypes } from '../types/event-sourcing'; export async function processor(context: MongoContext, effect: Effect<EffectTypes>) { const { session, collection, documentId } = context; const { type, key, value, sliceSize } = effect; // TODO void sliceSize; switch (type) { case 'set': { if (key.length === 0) { await collection.replaceOne( { _id: documentId }, { ...value, }, { session, upsert: true, } ); } else { await collection.updateOne( { _id: documentId }, { $set: { [key.join('.')]: value, }, }, { session, upsert: true } ); } break; } case 'remove': { if (key.length === 0) { await collection.deleteOne({ _id: documentId }, { session }); } else { await collection.updateOne( { _id: documentId }, { $unset: { [key.join('.')]: '', }, }, { session, upsert: true } ); } break; } case 'merge': { const $set: { [key: string]: TValue } = {}; for (const kv in value) { if (!Object.prototype.hasOwnProperty.call(value, kv)) { continue; } $set[[...key, kv].join('.')] = value[kv]; }
await collection.updateOne( { _id: documentId }, { $set, }, { session, upsert: true } ); break; } case 'setMaximum': { await collection.updateOne( { _id: documentId }, { $max: { [key.join('.')]: value, }, }, { session, upsert: true } ); break; } case 'setMinimum': { await collection.updateOne( { _id: documentId }, { $min: { [key.join('.')]: value, }, }, { session, upsert: true } ); break; } case 'increment': { await collection.updateOne( { _id: documentId }, { $inc: { [key.join('.')]: value, }, }, { session, upsert: true } ); break; } case 'decrement': { await collection.updateOne( { _id: documentId }, { $inc: { [key.join('.')]: -value, }, }, { session, upsert: true } ); break; } case 'multiply': { await collection.updateOne( { _id: documentId }, { $mul: { [key.join('.')]: value, }, }, { session, upsert: true } ); break; } case 'divide': { await collection.updateOne( { _id: documentId }, { $mul: { [key.join('.')]: 1 / value, }, }, { session, upsert: true } ); break; } case 'rename': { await collection.updateOne( { _id: documentId }, { $rename: { [key.join('.')]: [].concat(value).join('.'), }, }, { session, upsert: true } ); break; } case 'addToSet': { const items = [].concat(value); await collection.updateOne( { _id: documentId }, { $addToSet: { [key.join('.')]: { $each: items, }, }, }, { session, upsert: true } ); break; } case 'pushFront': { const items = [].concat(value); await collection.updateOne( { _id: documentId }, { $push: { [key.join('.')]: { $each: items, $position: 0, }, }, }, { session, upsert: true } ); break; } case 'popFront': { await collection.updateOne( { _id: documentId }, { $pop: { [key.join('.')]: -1, }, }, { session, upsert: true } ); break; } case 'pushBack': { const items = [].concat(value); await collection.updateOne( { _id: documentId }, { $push: { [key.join('.')]: { $each: items, }, }, }, { session, upsert: true } ); break; } case 'popBack': { await collection.updateOne( { _id: documentId }, { $pop: { [key.join('.')]: 1, }, }, { session, upsert: true } ); break; } case 'pullIN': { const items = [].concat(value); await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $in: items, }, }, }, { session, upsert: true } ); break; } case 'pullNIN': { const items = [].concat(value); await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $nin: items }, }, }, { session, upsert: true } ); break; } case 'pullEQ': { await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $in: [value] }, }, }, { session, upsert: true } ); break; } case 'pullGT': { if (Object(value) === value) { throw new TypeError(); } if (Array.isArray(value)) { throw new TypeError(); } await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $gt: value }, }, }, { session, upsert: true } ); break; } case 'pullGTE': { if (Object(value) === value) { throw new TypeError(); } await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $gte: value }, }, }, { session, upsert: true } ); break; } case 'pullLT': { if (Object(value) === value) { throw new TypeError(); } if (Array.isArray(value)) { throw new TypeError(); } await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $lt: value }, }, }, { session, upsert: true } ); break; } case 'pullLTE': { if (Object(value) === value) { throw new TypeError(); } if (Array.isArray(value)) { throw new TypeError(); } await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $lte: value }, }, }, { session, upsert: true } ); break; } case 'pullNE': { await collection.updateOne( { _id: documentId }, { $pull: { [key.join('.')]: { $ne: value }, }, }, { session, upsert: true } ); break; } case 'get': { const keyLength = key.length; const projection: Projection<any> = keyLength === 0 ? { _id: 0, _version: 0 } : { [key.join('.')]: 1, _id: 0, }; const result = await collection.findOne({ _id: documentId }, { projection, session }); let pointer = result; for (let index = 0; index < keyLength; index++) { if (pointer == null) { return undefined; } const pointerKey: any = key[index]; pointer = pointer[pointerKey]; } return pointer; } } }
growth.py
# -*- coding: utf-8 -*- # Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, # Technical University of Denmark. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provide an interface for growth experiments.""" from __future__ import absolute_import import logging from pandas import DataFrame from memote.experimental.experiment import Experiment __all__ = ("GrowthExperiment",) LOGGER = logging.getLogger(__name__) class GrowthExperiment(Experiment):
"""Represent a growth experiment.""" SCHEMA = "growth.json" def __init__(self, **kwargs): """ Initialize a growth experiment. Parameters ---------- kwargs """ super(GrowthExperiment, self).__init__(**kwargs) def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ if dtype_conversion is None: dtype_conversion = {"growth": str} super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion) self.data["growth"] = self.data["growth"].isin(self.TRUTHY) def evaluate(self, model, threshold=0.1): """Evaluate in silico growth rates.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) threshold *= model.slim_optimize() growth = list() for row in self.data.itertuples(index=False): with model: exchange = model.reactions.get_by_id(row.exchange) if bool(exchange.reactants): exchange.lower_bound = -row.uptake else: exchange.upper_bound = row.uptake growth.append(model.slim_optimize() >= threshold) return DataFrame({ "exchange": self.data["exchange"], "growth": growth })
internal.rs
use super::CandidType; use num_enum::TryFromPrimitive; use std::cell::RefCell; use std::collections::HashMap; // This is a re-implementation of std::any::TypeId to get rid of 'static constraint. // The current TypeId doesn't consider lifetime while computing the hash, which is // totally fine for IDL type, as we don't care about lifetime at all. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub struct TypeId { id: usize, } impl TypeId { pub fn of<T: ?Sized>() -> Self { TypeId { id: TypeId::of::<T> as usize, } } } #[derive(Debug, PartialEq, Hash, Eq, Clone)] pub enum Type { Null, Bool, Nat, Int, Nat8, Nat16, Nat32, Nat64, Int8, Int16, Int32, Int64, Float32, Float64, Text, Reserved, Empty, Knot(TypeId), // For recursive types from Rust Var(String), // For variables from Candid file Unknown, Opt(Box<Type>), Vec(Box<Type>), Record(Vec<Field>), Variant(Vec<Field>), Func(Function), Service(Vec<(String, Function)>), Principal, } #[derive(Debug, PartialEq, Hash, Eq, Clone)] pub struct Field { pub id: String, pub hash: u32, pub ty: Type, } #[derive(Debug, PartialEq, Hash, Eq, Clone)] pub struct Function { pub modes: Vec<crate::parser::types::FuncMode>, pub args: Vec<Type>, pub rets: Vec<Type>, } impl Function { pub fn is_query(&self) -> bool { self.modes.contains(&crate::parser::types::FuncMode::Query) } } #[derive(Debug, PartialEq, TryFromPrimitive)] #[repr(i64)] pub(crate) enum Opcode { Null = -1, Bool = -2, Nat = -3, Int = -4, Nat8 = -5, Nat16 = -6, Nat32 = -7, Nat64 = -8, Int8 = -9, Int16 = -10, Int32 = -11, Int64 = -12, Float32 = -13, Float64 = -14, Text = -15, Reserved = -16, Empty = -17, Opt = -18, Vec = -19, Record = -20, Variant = -21, Principal = -24, } pub fn is_primitive(t: &Type) -> bool { use self::Type::*; match t { Null | Bool | Nat | Int | Text => true, Nat8 | Nat16 | Nat32 | Nat64 => true, Int8 | Int16 | Int32 | Int64 => true, Float32 | Float64 => true, Reserved | Empty => true, Unknown => panic!("Unknown type"), Var(_) => panic!("Variable"), // Var may or may not be a primitive, so don't ask me Knot(_) => true, Opt(_) | Vec(_) | Record(_) | Variant(_) => false, Func(_) | Service(_) => false, Principal => true, } } pub fn unroll(t: &Type) -> Type { use self::Type::*; match t { Knot(id) => find_type(*id).unwrap(), Opt(ref t) => Opt(Box::new(unroll(t))), Vec(ref t) => Vec(Box::new(unroll(t))), Record(fs) => Record( fs.iter() .map(|Field { id, hash, ty }| Field { id: id.to_string(), hash: *hash, ty: unroll(ty), }) .collect(), ), Variant(fs) => Variant( fs.iter() .map(|Field { id, hash, ty }| Field { id: id.to_string(), hash: *hash, ty: unroll(ty), }) .collect(), ), _ => (*t).clone(), } } thread_local! { static ENV: RefCell<HashMap<TypeId, Type>> = RefCell::new(HashMap::new()); } pub(crate) fn find_type(id: TypeId) -> Option<Type> { ENV.with(|e| match e.borrow().get(&id) { None => None, Some(t) => Some((*t).clone()), }) } // only for debugging #[allow(dead_code)] pub(crate) fn show_env() { ENV.with(|e| println!("{:?}", e.borrow())); } pub(crate) fn env_add(id: TypeId, t: Type) { ENV.with(|e| drop(e.borrow_mut().insert(id, t))) } pub fn get_type<T>(_v: &T) -> Type where T: CandidType,
{ T::ty() }
get_resources.go
package get import ( "context" "fmt" "github.com/pkg/errors" "github.com/spf13/cobra" kumactl_cmd "github.com/kumahq/kuma/app/kumactl/pkg/cmd" "github.com/kumahq/kuma/app/kumactl/pkg/output" "github.com/kumahq/kuma/app/kumactl/pkg/output/printers" "github.com/kumahq/kuma/pkg/core/resources/model" rest_types "github.com/kumahq/kuma/pkg/core/resources/model/rest" core_store "github.com/kumahq/kuma/pkg/core/resources/store" ) func NewGetResourcesCmd(pctx *kumactl_cmd.RootContext, desc model.ResourceTypeDescriptor) *cobra.Command { cmd := &cobra.Command{ Use: desc.KumactlListArg, Short: fmt.Sprintf("Show %s", desc.Name), Long: fmt.Sprintf("Show %s entities.", desc.Name), Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { rs, err := pctx.CurrentResourceStore() if err != nil { return err } resources := desc.NewList() currentMesh := pctx.CurrentMesh() resource := resources.NewItem() if resource.Descriptor().Scope == model.ScopeGlobal
if err := rs.List(context.Background(), resources, core_store.ListByMesh(currentMesh), core_store.ListByPage(pctx.ListContext.Args.Size, pctx.ListContext.Args.Offset)); err != nil { return errors.Wrapf(err, "failed to list "+string(desc.Name)) } switch format := output.Format(pctx.GetContext.Args.OutputFormat); format { case output.TableFormat: return ResolvePrinter(desc.Name, resource.Descriptor().Scope).Print(pctx.Now(), resources, cmd.OutOrStdout()) default: printer, err := printers.NewGenericPrinter(format) if err != nil { return err } return printer.Print(rest_types.From.ResourceList(resources), cmd.OutOrStdout()) } }, } return cmd }
{ currentMesh = "" }
update.rs
//! Update message payloads. //! //! See the [message module] documentation since this is a private module anyways. //! //! [message module]: crate::message use std::ops::Range; use anyhow::{anyhow, Context}; use crate::{ crypto::ByteObject, mask::object::{serialization::MaskObjectBuffer, MaskObject}, message::{ traits::{FromBytes, LengthValueBuffer, ToBytes}, utils::range, DecodeError, }, LocalSeedDict, ParticipantTaskSignature, }; const SUM_SIGNATURE_RANGE: Range<usize> = range(0, ParticipantTaskSignature::LENGTH); const UPDATE_SIGNATURE_RANGE: Range<usize> = range(SUM_SIGNATURE_RANGE.end, ParticipantTaskSignature::LENGTH); #[derive(Clone, Debug)] /// A wrapper around a buffer that contains an [`Update`] message. /// /// It provides getters and setters to access the different fields of the message safely. pub struct UpdateBuffer<T> { inner: T, } impl<T: AsRef<[u8]>> UpdateBuffer<T> { /// Performs bound checks for the various message fields on `bytes` and returns a new /// [`UpdateBuffer`]. /// /// # Errors /// Fails if the `bytes` are smaller than a minimal-sized update message buffer. pub fn new(bytes: T) -> Result<Self, DecodeError> { let buffer = Self { inner: bytes }; buffer .check_buffer_length() .context("invalid UpdateBuffer")?; Ok(buffer) } /// Returns an [`UpdateBuffer`] without performing any bound checks. /// /// This means accessing the various fields may panic if the data is invalid. pub fn new_unchecked(bytes: T) -> Self { Self { inner: bytes } } /// Performs bound checks to ensure the fields can be accessed without panicking. pub fn check_buffer_length(&self) -> Result<(), DecodeError> { let len = self.inner.as_ref().len(); // First, check the fixed size portion of the // header. UPDATE_SIGNATURE_RANGE is the last field if len < UPDATE_SIGNATURE_RANGE.end { return Err(anyhow!( "invalid buffer length: {} < {}", len, UPDATE_SIGNATURE_RANGE.end )); } // Check length of the masked object field MaskObjectBuffer::new(&self.inner.as_ref()[self.masked_model_offset()..]) .context("invalid masked object field")?; // Check the length of the local seed dictionary field let _ = LengthValueBuffer::new(&self.inner.as_ref()[self.local_seed_dict_offset()..]) .context("invalid local seed dictionary length")?; Ok(()) } /// Gets the offset of the masked model field. fn masked_model_offset(&self) -> usize { UPDATE_SIGNATURE_RANGE.end } /// Gets the offset of the local seed dictionary field. /// /// # Panics /// Computing the offset may panic if the buffer has not been checked before. fn local_seed_dict_offset(&self) -> usize { let masked_model = MaskObjectBuffer::new_unchecked(&self.inner.as_ref()[self.masked_model_offset()..]); self.masked_model_offset() + masked_model.len() } } impl<'a, T: AsRef<[u8]> + ?Sized> UpdateBuffer<&'a T> { /// Gets the sum signature field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn sum_signature(&self) -> &'a [u8] { &self.inner.as_ref()[SUM_SIGNATURE_RANGE] } /// Gets the update signature field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn update_signature(&self) -> &'a [u8] { &self.inner.as_ref()[UPDATE_SIGNATURE_RANGE] } /// Gets a slice that starts at the beginning of the masked model field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn masked_model(&self) -> &'a [u8] { let offset = self.masked_model_offset(); &self.inner.as_ref()[offset..] } /// Gets a slice that starts at the beginning og the local seed dictionary field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn local_seed_dict(&self) -> &'a [u8] { let offset = self.local_seed_dict_offset(); &self.inner.as_ref()[offset..] } } impl<T: AsRef<[u8]> + AsMut<[u8]>> UpdateBuffer<T> { /// Gets a mutable reference to the sum signature field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn sum_signature_mut(&mut self) -> &mut [u8] { &mut self.inner.as_mut()[SUM_SIGNATURE_RANGE] } /// Gets a mutable reference to the update signature field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn update_signature_mut(&mut self) -> &mut [u8] { &mut self.inner.as_mut()[UPDATE_SIGNATURE_RANGE] } /// Gets a mutable slice that starts at the beginning of the masked model field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn masked_model_mut(&mut self) -> &mut [u8] { let offset = self.masked_model_offset(); &mut self.inner.as_mut()[offset..] } /// Gets a mutable slice that starts at the beginning of the local seed dictionary field. /// /// # Panics /// Accessing the field may panic if the buffer has not been checked before. pub fn local_seed_dict_mut(&mut self) -> &mut [u8] { let offset = self.local_seed_dict_offset(); &mut self.inner.as_mut()[offset..] } } #[derive(Debug, Eq, PartialEq, Clone)] /// A high level representation of an update message. /// /// These messages are sent by update participants during the update phase. pub struct Update { /// The signature of the round seed and the word "sum". /// /// This is used to determine whether a participant is selected for the sum task. pub sum_signature: ParticipantTaskSignature, /// Signature of the round seed and the word "update". /// /// This is used to determine whether a participant is selected for the update task. pub update_signature: ParticipantTaskSignature, /// A model trained by an update participant. /// /// The model is masked with randomness derived from the participant seed. pub masked_model: MaskObject, /// A dictionary that contains the seed used to mask `masked_model`. /// /// The seed is encrypted with the ephemeral public key of each sum participant. pub local_seed_dict: LocalSeedDict, } impl ToBytes for Update { fn buffer_length(&self) -> usize { UPDATE_SIGNATURE_RANGE.end + self.masked_model.buffer_length() + self.local_seed_dict.buffer_length() } fn to_bytes<T: AsMut<[u8]> + AsRef<[u8]>>(&self, buffer: &mut T) { let mut writer = UpdateBuffer::new_unchecked(buffer.as_mut()); self.sum_signature.to_bytes(&mut writer.sum_signature_mut()); self.update_signature .to_bytes(&mut writer.update_signature_mut()); self.masked_model.to_bytes(&mut writer.masked_model_mut()); self.local_seed_dict .to_bytes(&mut writer.local_seed_dict_mut()); } } impl FromBytes for Update { fn from_byte_slice<T: AsRef<[u8]>>(buffer: &T) -> Result<Self, DecodeError>
fn from_byte_stream<I: Iterator<Item = u8> + ExactSizeIterator>( iter: &mut I, ) -> Result<Self, DecodeError> { Ok(Self { sum_signature: ParticipantTaskSignature::from_byte_stream(iter) .context("invalid sum signature")?, update_signature: ParticipantTaskSignature::from_byte_stream(iter) .context("invalid update signature")?, masked_model: MaskObject::from_byte_stream(iter).context("invalid masked model")?, local_seed_dict: LocalSeedDict::from_byte_stream(iter) .context("invalid local seed dictionary")?, }) } } #[cfg(test)] pub mod tests { use super::*; use crate::testutils::messages::update as helpers; #[test] fn buffer_read() { let bytes = helpers::payload().1; let buffer = UpdateBuffer::new(&bytes).unwrap(); assert_eq!( buffer.sum_signature(), helpers::sum_task_signature().1.as_slice() ); assert_eq!( buffer.update_signature(), helpers::update_task_signature().1.as_slice() ); let expected = helpers::mask_object().1; assert_eq!(&buffer.masked_model()[..expected.len()], &expected[..]); assert_eq!(buffer.local_seed_dict(), &helpers::local_seed_dict().1[..]); } #[test] fn decode_invalid_seed_dict() { let mut invalid = helpers::local_seed_dict().1; // This truncates the last entry of the seed dictionary invalid[3] = 0xe3; let mut bytes = vec![]; bytes.extend(helpers::sum_task_signature().1); bytes.extend(helpers::update_task_signature().1); bytes.extend(helpers::mask_object().1); bytes.extend(invalid); let e = Update::from_byte_slice(&bytes).unwrap_err(); let cause = e.source().unwrap().to_string(); assert_eq!( cause, "invalid local seed dictionary: trailing bytes".to_string() ); } #[test] fn decode() { let (update, bytes) = helpers::payload(); let parsed = Update::from_byte_slice(&bytes).unwrap(); assert_eq!(parsed, update); } #[test] fn stream_parse() { let (update, bytes) = helpers::payload(); let parsed = Update::from_byte_stream(&mut bytes.into_iter()).unwrap(); assert_eq!(parsed, update); } #[test] fn encode() { let (update, bytes) = helpers::payload(); assert_eq!(update.buffer_length(), bytes.len()); let mut buf = vec![0xff; update.buffer_length()]; update.to_bytes(&mut buf); // The order in which the hashmap is serialized is not // guaranteed, but we chose our key/values such that they are // sorted. // // First compute the offset at which the local seed dict value // starts: two signature (64 bytes), the masked model (32 // bytes), the length field (4 bytes), the masked scalar (10 bytes) let offset = 64 * 2 + 32 + 4 + 10; // Sort the end of the buffer (&mut buf[offset..]).sort_unstable(); assert_eq!(buf, bytes); } }
{ let reader = UpdateBuffer::new(buffer.as_ref())?; Ok(Self { sum_signature: ParticipantTaskSignature::from_byte_slice(&reader.sum_signature()) .context("invalid sum signature")?, update_signature: ParticipantTaskSignature::from_byte_slice(&reader.update_signature()) .context("invalid update signature")?, masked_model: MaskObject::from_byte_slice(&reader.masked_model()) .context("invalid masked model")?, local_seed_dict: LocalSeedDict::from_byte_slice(&reader.local_seed_dict()) .context("invalid local seed dictionary")?, }) }
event.rs
//! This file handles interruptions, it provides an interface allowing to register callbacks for //! each interrupts. Each callback has a priority number and is called in descreasing order. use core::ffi::c_void; use core::mem::MaybeUninit; use crate::errno::Errno; use crate::idt::pic; use crate::idt; use crate::process::Regs; use crate::process::tss; use crate::util::boxed::Box; use crate::util::container::vec::Vec; use crate::util::lock::mutex::*; /// The list of interrupt error messages ordered by index of the corresponding interrupt vector. #[cfg(config_general_arch = "x86")] static ERROR_MESSAGES: &[&str] = &[ "Divide-by-zero Error", "Debug", "Non-maskable Interrupt", "Breakpoint", "Overflow", "Bound Range Exceeded", "Invalid Opcode", "Device Not Available", "Double Fault", "Coprocessor Segment Overrun",
"Invalid TSS", "Segment Not Present", "Stack-Segment Fault", "General Protection Fault", "Page Fault", "Unknown", "x87 Floating-Point Exception", "Alignement Check", "Machine Check", "SIMD Floating-Point Exception", "Virtualization Exception", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Security Exception", "Unknown" ]; /// Returns the error message corresponding to the given interrupt vector index `i`. fn get_error_message(i: u32) -> &'static str { if (i as usize) < ERROR_MESSAGES.len() { ERROR_MESSAGES[i as usize] } else { "Unknown" } } /// The action to execute after the interrupt handler has returned. pub enum InterruptResultAction { /// Resumes execution of the code where it was interrupted. Resume, /// Goes back to the kernel loop, waiting for another interruption. Loop, /// Makes the kernel panic. Panic, } /// Enumeration telling which action will be executed after an interrupt handler. pub struct InterruptResult { /// Tells whether to skip execution of the next interrupt handlers (with lower priority). skip_next: bool, /// The action to execute after the handler. The last handler decides which action to execute /// unless the `skip_next` variable is set to `true`. action: InterruptResultAction, } impl InterruptResult { /// Creates a new instance. pub fn new(skip_next: bool, action: InterruptResultAction) -> Self { Self { skip_next, action, } } } /// Structure wrapping a callback to insert it into a linked list. struct CallbackWrapper { /// The priority associated with the callback. Higher value means higher priority priority: u32, /// The callback /// First argument: `id` is the id of the interrupt. /// Second argument: `code` is an optional code associated with the interrupt. If no code /// is given, the value is `0`. /// Third argument: `regs` the values of the registers when the interruption was triggered. /// Fourth argument: `ring` tells the ring at which the code was running. /// The return value tells which action to perform next. callback: Box<dyn FnMut(u32, u32, &Regs, u32) -> InterruptResult>, } /// Structure used to detect whenever the object owning the callback is destroyed, allowing to /// unregister it automatically. #[must_use] pub struct CallbackHook { /// The id of the interrupt the callback is bound to. id: usize, /// The priority of the callback. priority: u32, /// The pointer of the callback. ptr: *const c_void, } impl CallbackHook { /// Creates a new instance. fn new(id: usize, priority: u32, ptr: *const c_void) -> Self { Self { id, priority, ptr, } } } impl Drop for CallbackHook { fn drop(&mut self) { remove_callback(self.id, self.priority, self.ptr); } } /// List containing vectors that store callbacks for every interrupt watchdogs. static mut CALLBACKS: MaybeUninit<[Mutex<Vec<CallbackWrapper>>; idt::ENTRIES_COUNT as _]> = MaybeUninit::uninit(); /// Initializes the events handler. /// This function must be called only once when booting. pub fn init() { let callbacks = unsafe { // Safe because called only once CALLBACKS.assume_init_mut() }; for c in callbacks { *c.lock(false).get_mut() = Vec::new(); } } /// Registers the given callback and returns a reference to it. /// `id` is the id of the interrupt to watch. /// `priority` is the priority for the callback. Higher value means higher priority. /// `callback` is the callback to register. /// /// If the `id` is invalid or if an allocation fails, the function shall return an error. pub fn register_callback<T>(id: usize, priority: u32, callback: T) -> Result<CallbackHook, Errno> where T: 'static + FnMut(u32, u32, &Regs, u32) -> InterruptResult { debug_assert!(id < idt::ENTRIES_COUNT); idt::wrap_disable_interrupts(|| { let mut guard = unsafe { CALLBACKS.assume_init_mut() }[id].lock(false); let vec = &mut guard.get_mut(); let index = { let r = vec.binary_search_by(| x | { x.priority.cmp(&priority) }); if let Err(l) = r { l } else { r.unwrap() } }; let b = Box::new(callback)?; let ptr = b.as_ptr(); vec.insert(index, CallbackWrapper { priority, callback: b, })?; Ok(CallbackHook::new(id, priority, ptr as _)) }) } /// Removes the callback with id `id`, priority `priority` and pointer `ptr`. fn remove_callback(id: usize, priority: u32, ptr: *const c_void) { let mut guard = unsafe { CALLBACKS.assume_init_mut() }[id].lock(false); let vec = &mut guard.get_mut(); let res = vec.binary_search_by(| x | { x.priority.cmp(&priority) }); if let Ok(index) = res { let mut i = index; while i < vec.len() && vec[i].priority == priority { if vec[i].callback.as_ptr() as *const c_void == ptr { vec.remove(i); break; } i += 1; } } } /// Unlocks the callback vector with id `id`. This function is to be used in case of an event /// callback that never returns. /// It must be called from the same CPU core as the one that locked the mutex since unlocking /// changes the interrupt flag. /// This function is marked as unsafe since it may lead to concurrency issues if not used properly. #[no_mangle] pub unsafe extern "C" fn unlock_callbacks(id: usize) { CALLBACKS.assume_init_mut()[id as usize].unlock(); } /// This function is called whenever an interruption is triggered. /// `id` is the identifier of the interrupt type. This value is architecture-dependent. /// `code` is an optional code associated with the interrupt. If the interrupt type doesn't have a /// code, the value is `0`. /// `regs` is the state of the registers at the moment of the interrupt. /// `ring` tells the ring at which the code was running. #[no_mangle] pub extern "C" fn event_handler(id: u32, code: u32, ring: u32, regs: &Regs) { let action = { let mut guard = unsafe { &mut CALLBACKS.assume_init_mut()[id as usize] }.lock(false); let callbacks = guard.get_mut(); let mut last_action = { if (id as usize) < ERROR_MESSAGES.len() { InterruptResultAction::Panic } else { InterruptResultAction::Resume } }; for i in 0..callbacks.len() { let result = (callbacks[i].callback)(id, code, regs, ring); last_action = result.action; if result.skip_next { break; } } last_action }; match action { InterruptResultAction::Resume => {}, InterruptResultAction::Loop => { pic::end_of_interrupt(id as _); // FIXME: Use of loop action before TSS init shall result in undefined behaviour unsafe { crate::loop_reset(tss::get().esp0 as _); } }, InterruptResultAction::Panic => { crate::kernel_panic!(get_error_message(id), code); }, } }
mod.rs
//! Dynamic template engine support for handlebars and tera. //! //! # Overview //! //! The general outline for using templates in Rocket is: //! //! 0. Enable the `rocket_contrib` feature corresponding to your templating //! engine(s) of choice: //! //! ```toml //! [dependencies.rocket_contrib] //! version = 0.4.0-rc.2 //! default-features = false //! features = ["handlebars_templates", "tera_templates"] //! ``` //! //! 1. Write your template files in Handlebars (extension: `.hbs`) or tera //! (extensions: `.tera`) in the templates directory (default: //! `{rocket_root}/templates`). //! //! 2. Attach the template fairing, [`Template::fairing()`]: //! //! ```rust //! # extern crate rocket; //! # extern crate rocket_contrib; //! use rocket_contrib::templates::Template; //! //! fn main() { //! rocket::ignite() //! .attach(Template::fairing()) //! // ... //! # ; //! } //! ``` //! //! 3. Return a [`Template`] using [`Template::render()`], supplying the name //! of the template file minus the last two extensions, from a handler. //! //! ```rust //! # #![feature(proc_macro_hygiene, decl_macro)] //! # #[macro_use] extern crate rocket; //! # #[macro_use] extern crate rocket_contrib; //! # fn context() { } //! use rocket_contrib::templates::Template; //! //! #[get("/")] //! fn index() -> Template { //! let context = context(); //! Template::render("template-name", &context) //! } //! ``` //! //! ## Discovery //! //! Template names passed in to [`Template::render()`] must correspond to a //! previously discovered template in the configured template directory. The //! template directory is configured via the `template_dir` configuration //! parameter and defaults to `templates/`. The path set in `template_dir` is //! relative to the Rocket configuration file. See the [configuration //! chapter](https://rocket.rs/v0.4/guide/configuration/#extras) of the guide //! for more information on configuration. //! //! The corresponding templating engine used for a given template is based on a //! template's extension. At present, this library supports the following //! engines and extensions: //! //! * **Tera**: `.tera` //! * **Handlebars**: `.hbs` //! //! Any file that ends with one of these extension will be discovered and //! rendered with the corresponding templating engine. The _name_ of the //! template will be the path to the template file relative to `template_dir` //! minus at most two extensions. The following table illustrates this mapping: //! //! | path | name | //! |-----------------------------------------------|-----------------------| //! | {template_dir}/index.html.hbs | index | //! | {template_dir}/index.tera | index | //! | {template_dir}/index.hbs | index | //! | {template_dir}/dir/index.hbs | dir/index | //! | {template_dir}/dir/index.html.tera | dir/index | //! | {template_dir}/index.template.html.hbs | index.template | //! | {template_dir}/subdir/index.template.html.hbs | subdir/index.template | //! //! The recommended naming scheme is to use two extensions: one for the file //! type, and one for the template extension. This means that template //! extensions should look like: `.html.hbs`, `.html.tera`, `.xml.hbs`, etc. //! //! ## Template Fairing //! //! Template discovery is actualized by the template fairing, which itself is //! created via [`Template::fairing()`] or [`Template::custom()`], the latter of //! which allows for customizations to the templating engine. In order for _any_ //! templates to be rendered, the template fairing _must_ be //! [attached](rocket::Rocket::attach()) to the running Rocket instance. Failure //! to do so will result in a run-time error. //! //! Templates are rendered with the `render` method. The method takes in the //! name of a template and a context to render the template with. The context //! can be any type that implements [`Serialize`] from [`serde`] and would //! serialize to an `Object` value. //! //! In debug mode (without the `--release` flag passed to `cargo`), templates //! will be automatically reloaded from disk if any changes have been made to //! the templates directory since the previous request. In release builds, //! template reloading is disabled to improve performance and cannot be enabled. //! //! [`Serialize`]: serde::Serialize //! [`Template`]: templates::Template //! [`Template::fairing()`]: templates::Template::fairing() //! [`Template::custom()`]: templates::Template::custom() //! [`Template::render()`]: templates::Template::render() extern crate serde; extern crate serde_json; extern crate glob; #[cfg(feature = "tera_templates")] pub extern crate tera; #[cfg(feature = "tera_templates")] mod tera_templates; #[cfg(feature = "handlebars_templates")] pub extern crate handlebars; #[cfg(feature = "handlebars_templates")] mod handlebars_templates; mod engine; mod fairing; mod context; mod metadata; pub use self::engine::Engines; pub use self::metadata::Metadata; crate use self::context::Context; crate use self::fairing::ContextManager; use self::engine::Engine; use self::fairing::TemplateFairing; use self::serde::Serialize; use self::serde_json::{Value, to_value}; use self::glob::glob; use std::borrow::Cow; use std::path::PathBuf; use rocket::{Rocket, State}; use rocket::request::Request; use rocket::fairing::Fairing; use rocket::response::{self, Content, Responder}; use rocket::http::{ContentType, Status}; const DEFAULT_TEMPLATE_DIR: &str = "templates"; /// Responder that renders a dynamic template. /// /// # Usage /// /// To use, add the `handlebars_templates` feature, the `tera_templates` /// feature, or both, to the `rocket_contrib` dependencies section of your /// `Cargo.toml`: /// /// ```toml /// [dependencies.rocket_contrib] /// version = "*" /// default-features = false /// features = ["handlebars_templates", "tera_templates"] /// ``` /// /// Then, ensure that the template [`Fairing`] is attached to your Rocket /// application: /// /// ```rust /// # extern crate rocket; /// # extern crate rocket_contrib; /// use rocket_contrib::templates::Template; /// /// fn main() { /// rocket::ignite() /// .attach(Template::fairing()) /// // ... /// # ; /// } /// ``` /// /// The `Template` type implements Rocket's [`Responder`] trait, so it can be /// returned from a request handler directly: /// /// ```rust /// # #![feature(proc_macro_hygiene, decl_macro)] /// # #[macro_use] extern crate rocket; /// # #[macro_use] extern crate rocket_contrib; /// # fn context() { } /// use rocket_contrib::templates::Template; /// /// #[get("/")] /// fn index() -> Template { /// let context = context(); /// Template::render("index", &context) /// } /// ``` /// /// # Helpers, Filters, and Customization /// /// You may use the [`Template::custom()`] method to construct a fairing with /// customized templating engines. Among other things, this method allows you to /// register template helpers and register templates from strings. #[derive(Debug)] pub struct Template { name: Cow<'static, str>, value: Option<Value> } #[derive(Debug)] crate struct TemplateInfo { /// The complete path, including `template_dir`, to this template. path: PathBuf, /// The extension for the engine of this template. extension: String, /// The extension before the engine extension in the template, if any. data_type: ContentType } impl Template { /// Returns a fairing that initializes and maintains templating state. /// /// This fairing, or the one returned by [`Template::custom()`], _must_ be /// attached to any `Rocket` instance that wishes to render templates. /// Failure to attach this fairing will result in a "Uninitialized template /// context: missing fairing." error message when a template is attempted to /// be rendered. /// /// If you wish to customize the internal templating engines, use /// [`Template::custom()`] instead. /// /// # Example /// /// To attach this fairing, simple call `attach` on the application's /// `Rocket` instance with `Template::fairing()`: /// /// ```rust /// extern crate rocket; /// extern crate rocket_contrib; /// /// use rocket_contrib::templates::Template; /// /// fn main() { /// rocket::ignite() /// // ... /// .attach(Template::fairing()) /// // ... /// # ; /// } /// ``` pub fn fairing() -> impl Fairing
/// Returns a fairing that initializes and maintains templating state. /// /// Unlike [`Template::fairing()`], this method allows you to configure /// templating engines via the parameter `f`. Note that only the enabled /// templating engines will be accessible from the `Engines` type. /// /// # Example /// /// ```rust /// extern crate rocket; /// extern crate rocket_contrib; /// /// use rocket_contrib::templates::Template; /// /// fn main() { /// rocket::ignite() /// // ... /// .attach(Template::custom(|engines| { /// // engines.handlebars.register_helper ... /// })) /// // ... /// # ; /// } /// ``` pub fn custom<F>(f: F) -> impl Fairing where F: Fn(&mut Engines) + Send + Sync + 'static { TemplateFairing { custom_callback: Box::new(f) } } /// Render the template named `name` with the context `context`. The /// `context` can be of any type that implements `Serialize`. This is /// typically a `HashMap` or a custom `struct`. /// /// # Example /// /// ```rust /// use std::collections::HashMap; /// use rocket_contrib::templates::Template; /// /// // Create a `context`. Here, just an empty `HashMap`. /// let mut context = HashMap::new(); /// /// # context.insert("test", "test"); /// # #[allow(unused_variables)] /// let template = Template::render("index", context); #[inline] pub fn render<S, C>(name: S, context: C) -> Template where S: Into<Cow<'static, str>>, C: Serialize { Template { name: name.into(), value: to_value(context).ok() } } /// Render the template named `name` with the context `context` into a /// `String`. This method should **not** be used in any running Rocket /// application. This method should only be used during testing to validate /// `Template` responses. For other uses, use [`render()`](#method.render) /// instead. /// /// The `context` can be of any type that implements `Serialize`. This is /// typically a `HashMap` or a custom `struct`. /// /// Returns `Some` if the template could be rendered. Otherwise, returns /// `None`. If rendering fails, error output is printed to the console. /// `None` is also returned if a `Template` fairing has not been attached. /// /// # Example /// /// ```rust /// # extern crate rocket; /// # extern crate rocket_contrib; /// use std::collections::HashMap; /// /// use rocket_contrib::templates::Template; /// use rocket::local::Client; /// /// fn main() { /// let rocket = rocket::ignite().attach(Template::fairing()); /// let client = Client::new(rocket).expect("valid rocket"); /// /// // Create a `context`. Here, just an empty `HashMap`. /// let mut context = HashMap::new(); /// /// # context.insert("test", "test"); /// # #[allow(unused_variables)] /// let template = Template::show(client.rocket(), "index", context); /// } /// ``` #[inline] pub fn show<S, C>(rocket: &Rocket, name: S, context: C) -> Option<String> where S: Into<Cow<'static, str>>, C: Serialize { let ctxt = rocket.state::<ContextManager>().map(ContextManager::context).or_else(|| { warn!("Uninitialized template context: missing fairing."); info!("To use templates, you must attach `Template::fairing()`."); info!("See the `Template` documentation for more information."); None })?; Template::render(name, context).finalize(&ctxt).ok().map(|v| v.0) } /// Actually render this template given a template context. This method is /// called by the `Template` `Responder` implementation as well as /// `Template::show()`. #[inline(always)] fn finalize(self, ctxt: &Context) -> Result<(String, ContentType), Status> { let name = &*self.name; let info = ctxt.templates.get(name).ok_or_else(|| { let ts: Vec<_> = ctxt.templates.keys().map(|s| s.as_str()).collect(); error_!("Template '{}' does not exist.", name); info_!("Known templates: {}", ts.join(",")); info_!("Searched in '{:?}'.", ctxt.root); Status::InternalServerError })?; let value = self.value.ok_or_else(|| { error_!("The provided template context failed to serialize."); Status::InternalServerError })?; let string = ctxt.engines.render(name, &info, value).ok_or_else(|| { error_!("Template '{}' failed to render.", name); Status::InternalServerError })?; Ok((string, info.data_type.clone())) } } /// Returns a response with the Content-Type derived from the template's /// extension and a fixed-size body containing the rendered template. If /// rendering fails, an `Err` of `Status::InternalServerError` is returned. impl Responder<'static> for Template { fn respond_to(self, req: &Request) -> response::Result<'static> { let ctxt = req.guard::<State<ContextManager>>().succeeded().ok_or_else(|| { error_!("Uninitialized template context: missing fairing."); info_!("To use templates, you must attach `Template::fairing()`."); info_!("See the `Template` documentation for more information."); Status::InternalServerError })?.inner().context(); let (render, content_type) = self.finalize(&ctxt)?; Content(content_type, render).respond_to(req) } }
{ Template::custom(|_| {}) }
fixtures.go
//go:build integration // +build integration package testutils import ( "context" "fmt" "os" "strconv" "time" "github.com/jackc/pgconn" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/config" "github.com/determined-ai/determined/master/internal/elastic" "github.com/determined-ai/determined/master/pkg/model" "github.com/sirupsen/logrus" "github.com/ghodss/yaml" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "github.com/determined-ai/determined/master/internal" "github.com/determined-ai/determined/master/pkg/check" "github.com/determined-ai/determined/master/pkg/logger" "github.com/determined-ai/determined/master/version" "github.com/determined-ai/determined/proto/pkg/apiv1" ) const ( defaultUsername = "determined" defaultMasterConfig = ` checkpoint_storage: type: shared_fs host_path: /tmp resource_manager: type: agent db: user: postgres password: postgres name: determined migrations: file://../../../static/migrations root: ../../.. ` ) // ResolveElastic resolves a connection to an elasticsearch database. To debug tests that use this // (or otherwise run the tests outside of the Makefile), make sure to set DET_INTEGRATION_ES_HOST and // DET_INTEGRATION_ES_PORT. func ResolveElastic() (*elastic.Elastic, error) { es, err := elastic.Setup(*DefaultElasticConfig().ElasticLoggingConfig) if err != nil { return nil, fmt.Errorf("failed to connect to elasticsearch: %w", err) } return es, nil } // RunMaster runs a master in a goroutine and returns a reference to the master, // along with all the external context required to interact with the master, and // a function to close it. func RunMaster(ctx context.Context, c *config.Config) ( *internal.Master, *logger.LogBuffer, apiv1.DeterminedClient, context.Context, error, ) { if c == nil { dConf, err := DefaultMasterConfig() if err != nil { return nil, nil, nil, nil, err } c = dConf } logs := logger.NewLogBuffer(100) m := internal.New(version.Version, logs, c) logrus.AddHook(logs) go func() { err := m.Run(ctx) switch { case err == context.Canceled: fmt.Println("master stopped") case err != nil: fmt.Println("error running master: ", err) } }() cl, err := ConnectMaster(c) if err != nil { return nil, nil, nil, nil, err } creds, err := APICredentials(context.Background(), cl) if err != nil { return nil, nil, nil, nil, err } return m, logs, cl, creds, nil } // ConnectMaster blocks until a connection can be made to this master, assumed to be running // on localhost on the port indicated by the configuration. Returns an error if unable to connect // after 5 tries with 100ms delay between each. func ConnectMaster(c *config.Config) (apiv1.DeterminedClient, error) { var cl apiv1.DeterminedClient var clConn *grpc.ClientConn var err error for i := 0; i < 15; i++ { clConn, err = grpc.Dial(fmt.Sprintf("localhost:%d", c.Port), grpc.WithInsecure()) if err != nil { err = fmt.Errorf("failed to dial master: %w", err) continue } cl = apiv1.NewDeterminedClient(clConn) _, err = cl.Login(context.Background(), &apiv1.LoginRequest{Username: defaultUsername}) if err == nil { return cl, nil } time.Sleep(time.Second) } return nil, fmt.Errorf("failed to connect to master: %w", err) } // DefaultMasterConfig returns the default master configuration. func DefaultMasterConfig() (*config.Config, error) { c := config.DefaultConfig() if err := yaml.Unmarshal([]byte(defaultMasterConfig), c, yaml.DisallowUnknownFields); err != nil { return nil, err } pgCfg, err := pgconn.ParseConfig(os.Getenv("DET_INTEGRATION_POSTGRES_URL")) if err != nil { return nil, errors.Wrap(err, "failed to parse database string") } c.DB.Host = pgCfg.Host c.DB.Port = strconv.Itoa(int(pgCfg.Port)) c.DB.User = pgCfg.User c.DB.Password = pgCfg.Password c.DB.Name = pgCfg.Database if err := c.Resolve(); err != nil { return nil, err } if err := check.Validate(c); err != nil { return nil, err } return c, nil } func DefaultElasticConfig() model.LoggingConfig { port, err := strconv.Atoi(os.Getenv("DET_INTEGRATION_ES_PORT")) if err != nil
return model.LoggingConfig{ ElasticLoggingConfig: &model.ElasticLoggingConfig{ Host: os.Getenv("DET_INTEGRATION_ES_HOST"), Port: port, }, } } // CurrentLogstashElasticIndex returns the current active trial log index. func CurrentLogstashElasticIndex() string { return elastic.CurrentLogstashIndex() } // APICredentials takes a context and a connected apiv1.DeterminedClient and returns a context // with credentials or an error if unable to login with defaults. func APICredentials(ctx context.Context, cl apiv1.DeterminedClient) (context.Context, error) { resp, err := cl.Login(context.TODO(), &apiv1.LoginRequest{Username: defaultUsername}) if err != nil { return nil, fmt.Errorf("failed to login: %w", err) } return metadata.AppendToOutgoingContext( ctx, "x-user-token", fmt.Sprintf("Bearer %s", resp.Token)), nil }
{ panic("elastic config had non-numeric port") }
t315.py
def default_outside(x=[]): return x a = default_outside() a.append(1) print a b = default_outside() b.append(2)
print b
website_cloner.py
"""" Program name : Website cloner author : https://github.com/codeperfectplus How to use : Check README.md """ import os import sys import requests from bs4 import BeautifulSoup class CloneWebsite: def __init__(self, website_name): self.website_name = website_name
def crawl_website(self): """ This function will crawl website and return content""" content = requests.get(website_name) if content.status_code == 200: return content def create_folder(self): """ This funtion will create folder for website """ folder_name = (website_name.split("/"))[2] try: os.makedirs(folder_name) except Exception as e: print(e) return folder_name def save_website(self): """ This function will save website to respective folder """ folder_name = self.create_folder() content = self.crawl_website() with open( f"{folder_name}/index.html", "w", encoding="ascii", errors="ignore" ) as file: file.write(content.text) def save_image(self): folder_name = self.create_folder() os.chdir(folder_name) data = requests.get(website_name).text soup = BeautifulSoup(data, "html.parser") for img in soup.find_all("img"): src = img["src"] print(src) image_name = src.split("/")[-1] path = src.split("/")[:-1] path = "/".join(path) try: os.makedirs(path) except Exception: print("File Exists") if "/" == src[:1]: print(src) src = website_name + src img_data = requests.get(src).content with open(f"{path}/{image_name}", "wb") as file: file.write(img_data) print("complete") if __name__ == "__main__": website_name = sys.argv[1] clone = CloneWebsite(website_name) clone.save_website() clone.save_image()
.eslintrc.js
module.exports = { root: true, env: { node: true, jest: true, }, plugins: ['prettier', '@typescript-eslint/eslint-plugin'], extends: [ 'eslint:recommended', 'plugin:vue/base', 'plugin:vue/vue3-essential', 'plugin:vue/vue3-strongly-recommended', 'plugin:vue/vue3-recommended', 'plugin:@typescript-eslint/recommended', 'prettier', ], parser: '@typescript-eslint/parser', parserOptions: {
defineProps: 'readonly', defineEmits: 'readonly', }, rules: { semi: ['error', 'never'], 'prettier/prettier': 'error', '@typescript-eslint/no-unused-vars': [ 'error', { argsIgnorePattern: '^_', varsIgnorePattern: 'props' }, ], '@typescript-eslint/explicit-module-boundary-types': 'off', '@typescript-eslint/no-non-null-assertion': 'off', 'vue/attribute-hyphenation': 'off', 'vue/multi-word-component-names': 'off', 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off', 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off', }, overrides: [ { files: ['*.vue'], parser: 'vue-eslint-parser', parserOptions: { parser: '@typescript-eslint/parser', }, }, { files: ['**/*.test.{j,t}s'], env: { jest: true, }, }, ], }
ecmaVersion: 'latest', sourceType: 'module', }, globals: {
mod.rs
//! The libcore prelude //! //! This module is intended for users of libcore which do not link to libstd as //! well. This module is imported by default when `#![no_std]` is used in the //! same manner as the standard library's prelude. #![stable(feature = "core_prelude", since = "1.4.0")] pub mod v1; /// The 2015 version of the core prelude. /// /// See the [module-level documentation](self) for more. #[unstable(feature = "prelude_2015", issue = "85684")] pub mod rust_2015 { #[unstable(feature = "prelude_2015", issue = "85684")] #[doc(no_inline)] pub use super::v1::*; } /// The 2018 version of the core prelude. /// /// See the [module-level documentation](self) for more. #[unstable(feature = "prelude_2018", issue = "85684")] pub mod rust_2018 { #[unstable(feature = "prelude_2018", issue = "85684")] #[doc(no_inline)] pub use super::v1::*; } /// The 2021 version of the core prelude. /// /// See the [module-level documentation](self) for more. #[unstable(feature = "prelude_2021", issue = "85684")] pub mod rust_2021 { #[unstable(feature = "prelude_2021", issue = "85684")] #[doc(no_inline)] pub use super::v1::*; #[unstable(feature = "prelude_2021", issue = "85684")] #[doc(no_inline)]
pub use crate::convert::{TryFrom, TryInto}; }
pub use crate::iter::FromIterator; #[unstable(feature = "prelude_2021", issue = "85684")] #[doc(no_inline)]
notification.rs
use std::str::FromStr; use adw::prelude::*; use ashpd::{ desktop::notification::{Action, Button, Notification, NotificationProxy, Priority}, zbus, zvariant::Value, }; use glib::clone; use gtk::glib; use gtk::prelude::*; use gtk::subclass::prelude::*; use gtk_macros::spawn; mod imp { use adw::subclass::prelude::*; use gtk::CompositeTemplate; use super::*; #[derive(Debug, Default, CompositeTemplate)] #[template(resource = "/com/belmoussaoui/ashpd/demo/notification.ui")] pub struct
{ #[template_child] pub id_entry: TemplateChild<gtk::Entry>, #[template_child] pub title_entry: TemplateChild<gtk::Entry>, #[template_child] pub body_entry: TemplateChild<gtk::Entry>, #[template_child] pub priority_combo: TemplateChild<adw::ComboRow>, #[template_child] pub id_label: TemplateChild<gtk::Label>, #[template_child] pub action_name_label: TemplateChild<gtk::Label>, #[template_child] pub parameters_label: TemplateChild<gtk::Label>, #[template_child] pub response_group: TemplateChild<adw::PreferencesGroup>, } #[glib::object_subclass] impl ObjectSubclass for NotificationPage { const NAME: &'static str = "NotificationPage"; type Type = super::NotificationPage; type ParentType = adw::Bin; fn class_init(klass: &mut Self::Class) { Self::bind_template(klass); klass.install_action("notification.send", None, move |page, _action, _target| { page.send_notification().unwrap(); }); } fn instance_init(obj: &glib::subclass::InitializingObject<Self>) { obj.init_template(); } } impl ObjectImpl for NotificationPage { fn constructed(&self, _obj: &Self::Type) { let model = gtk::StringList::new(&[ &Priority::Low.to_string(), &Priority::Normal.to_string(), &Priority::High.to_string(), &Priority::Urgent.to_string(), ]); self.priority_combo.set_model(Some(&model)); self.priority_combo.set_selected(Priority::Normal as u32); } } impl WidgetImpl for NotificationPage {} impl BinImpl for NotificationPage {} } glib::wrapper! { pub struct NotificationPage(ObjectSubclass<imp::NotificationPage>) @extends gtk::Widget, adw::Bin; } impl NotificationPage { #[allow(clippy::new_without_default)] pub fn new() -> Self { glib::Object::new(&[]).expect("Failed to create a NotificationPage") } pub fn send_notification(&self) -> zbus::fdo::Result<()> { let self_ = imp::NotificationPage::from_instance(self); let notification_id = self_.id_entry.text(); let title = self_.title_entry.text(); let body = self_.body_entry.text(); let selected_item = self_ .priority_combo .selected_item() .unwrap() .downcast::<gtk::StringObject>() .unwrap() .string(); let priority = Priority::from_str(&selected_item).unwrap(); spawn!(clone!(@weak self as page => async move { let self_ = imp::NotificationPage::from_instance(&page); let action = notify( &notification_id, Notification::new(&title) .default_action("open") .default_action_target(Value::U32(100).into()) .body(&body) .priority(priority) .button(Button::new("Copy", "copy").target(Value::U32(32).into())) .button(Button::new("Delete", "delete").target(Value::U32(40).into())), ) .await .unwrap(); self_.response_group.show(); self_.id_label.set_text(action.id()); self_.action_name_label.set_text(action.name()); self_.parameters_label.set_text(&format!("{:#?}", action.parameter())); })); Ok(()) } } async fn notify(id: &str, notification: Notification) -> ashpd::Result<Action> { let cnx = zbus::azync::Connection::new_session().await?; let proxy = NotificationProxy::new(&cnx).await?; proxy.add_notification(&id, notification).await?; let action = proxy.receive_action_invoked().await?; Ok(action) }
NotificationPage
render.rs
use super::grammars::{ExternalToken, LexicalGrammar, SyntaxGrammar, VariableType}; use super::nfa::CharacterSet; use super::rules::{Alias, AliasMap, Symbol, SymbolType}; use super::tables::{ AdvanceAction, FieldLocation, GotoAction, LexState, LexTable, ParseAction, ParseTable, ParseTableEntry, }; use core::ops::Range; use std::cmp; use std::collections::{HashMap, HashSet}; use std::fmt::Write; use std::mem::swap; // Currently, the library supports a new ABI version that has not yet been // stabilized, and the parser generation does not use it by default. const STABLE_LANGUAGE_VERSION: usize = tree_sitter::LANGUAGE_VERSION - 1; const LARGE_CHARACTER_RANGE_COUNT: usize = 8; macro_rules! add { ($this: tt, $($arg: tt)*) => {{ $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); }} } macro_rules! add_whitespace { ($this: tt) => {{ for _ in 0..$this.indent_level { write!(&mut $this.buffer, " ").unwrap(); } }}; } macro_rules! add_line { ($this: tt, $($arg: tt)*) => { add_whitespace!($this); $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); $this.buffer += "\n"; } } macro_rules! indent { ($this: tt) => { $this.indent_level += 1; }; } macro_rules! dedent { ($this: tt) => { assert_ne!($this.indent_level, 0); $this.indent_level -= 1; }; } const SMALL_STATE_THRESHOLD: usize = 64; struct Generator { buffer: String, indent_level: usize, language_name: String, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, large_state_count: usize, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, symbol_order: HashMap<Symbol, usize>, symbol_ids: HashMap<Symbol, String>, alias_ids: HashMap<Alias, String>, unique_aliases: Vec<Alias>, symbol_map: HashMap<Symbol, Symbol>, field_names: Vec<String>, next_abi: bool, } struct TransitionSummary { is_included: bool, ranges: Vec<Range<char>>, call_id: Option<usize>, } struct LargeCharacterSetInfo { ranges: Vec<Range<char>>, symbol: Symbol, index: usize, usage_count: usize, } impl Generator { fn generate(mut self) -> String { self.init(); self.add_includes(); self.add_pragmas(); self.add_stats(); self.add_symbol_enum(); self.add_symbol_names_list(); self.add_unique_symbol_map(); self.add_symbol_metadata_list(); if !self.field_names.is_empty() { self.add_field_name_enum(); self.add_field_name_names_list(); self.add_field_sequences(); } if !self.parse_table.production_infos.is_empty() { self.add_alias_sequences(); } if self.next_abi { self.add_non_terminal_alias_map(); } let mut main_lex_table = LexTable::default(); swap(&mut main_lex_table, &mut self.main_lex_table); self.add_lex_function("ts_lex", main_lex_table, true); if self.keyword_capture_token.is_some() { let mut keyword_lex_table = LexTable::default(); swap(&mut keyword_lex_table, &mut self.keyword_lex_table); self.add_lex_function("ts_lex_keywords", keyword_lex_table, false); } self.add_lex_modes_list(); if !self.syntax_grammar.external_tokens.is_empty() { self.add_external_token_enum(); self.add_external_scanner_symbol_map(); self.add_external_scanner_states_list(); } self.add_parse_table(); self.add_parser_export(); self.buffer } fn init(&mut self) { let mut symbol_identifiers = HashSet::new(); for i in 0..self.parse_table.symbols.len() { self.assign_symbol_id(self.parse_table.symbols[i], &mut symbol_identifiers); } let mut field_names = Vec::new(); for production_info in &self.parse_table.production_infos { for field_name in production_info.field_map.keys() { field_names.push(field_name); } for alias in &production_info.alias_sequence { if let Some(alias) = &alias { let alias_kind = alias.kind(); let matching_symbol = self.parse_table.symbols.iter().cloned().find(|symbol| { let (name, kind) = self.metadata_for_symbol(*symbol); name == alias.value && kind == alias_kind }); let alias_id = if let Some(symbol) = matching_symbol { self.symbol_ids[&symbol].clone() } else if alias.is_named { format!("alias_sym_{}", self.sanitize_identifier(&alias.value)) } else { format!("anon_alias_sym_{}", self.sanitize_identifier(&alias.value)) }; self.alias_ids.entry(alias.clone()).or_insert(alias_id); } } } self.unique_aliases = self .alias_ids .keys() .filter(|alias| { self.parse_table .symbols .iter() .cloned() .find(|symbol| { let (name, kind) = self.metadata_for_symbol(*symbol); name == alias.value && kind == alias.kind() }) .is_none() }) .cloned() .collect(); self.unique_aliases.sort_unstable(); self.symbol_map = self .parse_table .symbols .iter() .map(|symbol| { let mut mapping = symbol; // There can be multiple symbols in the grammar that have the same name and kind, // due to simple aliases. When that happens, ensure that they map to the same // public-facing symbol. If one of the symbols is not aliased, choose that one // to be the public-facing symbol. Otherwise, pick the symbol with the lowest // numeric value. if let Some(alias) = self.simple_aliases.get(symbol) { let kind = alias.kind(); for other_symbol in &self.parse_table.symbols { if let Some(other_alias) = self.simple_aliases.get(other_symbol) { if other_symbol < mapping && other_alias == alias { mapping = other_symbol; } } else if self.metadata_for_symbol(*other_symbol) == (&alias.value, kind) { mapping = other_symbol; break; } } } // Two anonymous tokens with different flags but the same string value // should be represented with the same symbol in the public API. Examples: // * "<" and token(prec(1, "<")) // * "(" and token.immediate("(") else if symbol.is_terminal() { let metadata = self.metadata_for_symbol(*symbol); for other_symbol in &self.parse_table.symbols { let other_metadata = self.metadata_for_symbol(*other_symbol); if other_metadata == metadata { mapping = other_symbol; break; } } } (*symbol, *mapping) }) .collect(); field_names.sort_unstable(); field_names.dedup(); self.field_names = field_names.into_iter().cloned().collect(); // If we are opting in to the new unstable language ABI, then use the concept of // "small parse states". Otherwise, use the same representation for all parse // states. let threshold = cmp::min(SMALL_STATE_THRESHOLD, self.parse_table.symbols.len() / 2); self.large_state_count = self .parse_table .states .iter() .enumerate() .take_while(|(i, s)| { *i <= 1 || s.terminal_entries.len() + s.nonterminal_entries.len() > threshold }) .count(); } fn add_includes(&mut self) { add_line!(self, "#include <tree_sitter/parser.h>"); add_line!(self, ""); } fn add_pragmas(&mut self) { add_line!(self, "#if defined(__GNUC__) || defined(__clang__)"); add_line!(self, "#pragma GCC diagnostic push"); add_line!( self, "#pragma GCC diagnostic ignored \"-Wmissing-field-initializers\"" ); add_line!(self, "#endif"); add_line!(self, ""); // Compiling large lexer functions can be very slow. Disabling optimizations // is not ideal, but only a very small fraction of overall parse time is // spent lexing, so the performance impact of this is negligible. if self.main_lex_table.states.len() > 300 { add_line!(self, "#ifdef _MSC_VER"); add_line!(self, "#pragma optimize(\"\", off)"); add_line!(self, "#elif defined(__clang__)"); add_line!(self, "#pragma clang optimize off"); add_line!(self, "#elif defined(__GNUC__)"); add_line!(self, "#pragma GCC optimize (\"O0\")"); add_line!(self, "#endif"); add_line!(self, ""); } } fn add_stats(&mut self) { let token_count = self .parse_table .symbols .iter() .filter(|symbol| { if symbol.is_terminal() || symbol.is_eof() { true } else if symbol.is_external() { self.syntax_grammar.external_tokens[symbol.index] .corresponding_internal_token .is_none() } else { false } }) .count(); if self.next_abi { add_line!( self, "#define LANGUAGE_VERSION {}", tree_sitter::LANGUAGE_VERSION ); } else { add_line!(self, "#define LANGUAGE_VERSION {}", STABLE_LANGUAGE_VERSION); } add_line!( self, "#define STATE_COUNT {}", self.parse_table.states.len() ); add_line!(self, "#define LARGE_STATE_COUNT {}", self.large_state_count); add_line!( self, "#define SYMBOL_COUNT {}", self.parse_table.symbols.len() ); add_line!(self, "#define ALIAS_COUNT {}", self.unique_aliases.len(),); add_line!(self, "#define TOKEN_COUNT {}", token_count); add_line!( self, "#define EXTERNAL_TOKEN_COUNT {}", self.syntax_grammar.external_tokens.len() ); add_line!(self, "#define FIELD_COUNT {}", self.field_names.len()); add_line!( self, "#define MAX_ALIAS_SEQUENCE_LENGTH {}", self.parse_table.max_aliased_production_length ); add_line!(self, ""); } fn add_symbol_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); self.symbol_order.insert(Symbol::end(), 0); let mut i = 1; for symbol in self.parse_table.symbols.iter() { if *symbol != Symbol::end() { self.symbol_order.insert(*symbol, i); add_line!(self, "{} = {},", self.symbol_ids[&symbol], i); i += 1; } } for alias in &self.unique_aliases { add_line!(self, "{} = {},", self.alias_ids[&alias], i); i += 1; } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_names_list(&mut self) { add_line!(self, "static const char *ts_symbol_names[] = {{"); indent!(self); for symbol in self.parse_table.symbols.iter() { let name = self.sanitize_string( self.simple_aliases .get(symbol) .map(|alias| alias.value.as_str()) .unwrap_or(self.metadata_for_symbol(*symbol).0), ); add_line!(self, "[{}] = \"{}\",", self.symbol_ids[&symbol], name); } for alias in &self.unique_aliases { add_line!( self, "[{}] = \"{}\",", self.alias_ids[&alias], self.sanitize_string(&alias.value) ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_unique_symbol_map(&mut self) { add_line!(self, "static TSSymbol ts_symbol_map[] = {{"); indent!(self); for symbol in &self.parse_table.symbols { add_line!( self, "[{}] = {},", self.symbol_ids[symbol], self.symbol_ids[&self.symbol_map[symbol]], ); } for alias in &self.unique_aliases { add_line!( self, "[{}] = {},", self.alias_ids[&alias], self.alias_ids[&alias], ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for (i, field_name) in self.field_names.iter().enumerate() { add_line!(self, "{} = {},", self.field_id(field_name), i + 1); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_names_list(&mut self) { add_line!(self, "static const char *ts_field_names[] = {{"); indent!(self); add_line!(self, "[0] = NULL,"); for field_name in &self.field_names { add_line!( self, "[{}] = \"{}\",", self.field_id(field_name), field_name ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_metadata_list(&mut self) { add_line!( self, "static const TSSymbolMetadata ts_symbol_metadata[] = {{" ); indent!(self); for symbol in &self.parse_table.symbols { add_line!(self, "[{}] = {{", self.symbol_ids[&symbol]); indent!(self); if let Some(Alias { is_named, .. }) = self.simple_aliases.get(symbol) { add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", is_named); } else { match self.metadata_for_symbol(*symbol).1 { VariableType::Named => { add_line!(self, ".visible = true,"); add_line!(self, ".named = true,"); } VariableType::Anonymous => { add_line!(self, ".visible = true,"); add_line!(self, ".named = false,"); } VariableType::Hidden => { add_line!(self, ".visible = false,"); add_line!(self, ".named = true,"); if self.syntax_grammar.supertype_symbols.contains(symbol) { add_line!(self, ".supertype = true,"); } } VariableType::Auxiliary => { add_line!(self, ".visible = false,"); add_line!(self, ".named = false,"); } } } dedent!(self); add_line!(self, "}},"); } for alias in &self.unique_aliases { add_line!(self, "[{}] = {{", self.alias_ids[&alias]); indent!(self); add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", alias.is_named); dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_alias_sequences(&mut self) { add_line!( self, "static TSSymbol ts_alias_sequences[{}][MAX_ALIAS_SEQUENCE_LENGTH] = {{", self.parse_table.production_infos.len() ); indent!(self); for (i, production_info) in self.parse_table.production_infos.iter().enumerate() { if production_info.alias_sequence.is_empty() { // Work around MSVC's intolerance of empty array initializers by // explicitly zero-initializing the first element. if i == 0 { add_line!(self, "[0] = {{0}},"); } continue; } add_line!(self, "[{}] = {{", i); indent!(self); for (j, alias) in production_info.alias_sequence.iter().enumerate() { if let Some(alias) = alias { add_line!(self, "[{}] = {},", j, self.alias_ids[&alias]); } } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_non_terminal_alias_map(&mut self) { let mut aliases_by_symbol = HashMap::new(); for variable in &self.syntax_grammar.variables { for production in &variable.productions { for step in &production.steps { if let Some(alias) = &step.alias { if step.symbol.is_non_terminal() && !self.simple_aliases.contains_key(&step.symbol) { if self.symbol_ids.contains_key(&step.symbol) { let alias_ids = aliases_by_symbol.entry(step.symbol).or_insert(Vec::new()); if let Err(i) = alias_ids.binary_search(&alias) { alias_ids.insert(i, alias); } } } } } } } let mut aliases_by_symbol = aliases_by_symbol.iter().collect::<Vec<_>>(); aliases_by_symbol.sort_unstable_by_key(|e| e.0); add_line!(self, "static uint16_t ts_non_terminal_alias_map[] = {{"); indent!(self); for (symbol, aliases) in aliases_by_symbol { let symbol_id = &self.symbol_ids[symbol]; let public_symbol_id = &self.symbol_ids[&self.symbol_map[&symbol]]; add_line!(self, "{}, {},", symbol_id, 1 + aliases.len()); indent!(self); add_line!(self, "{},", public_symbol_id); for alias in aliases { add_line!(self, "{},", &self.alias_ids[&alias]); } dedent!(self); } add_line!(self, "0,"); dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_sequences(&mut self)
fn add_lex_function( &mut self, name: &str, lex_table: LexTable, extract_helper_functions: bool, ) { let mut ruled_out_chars = HashSet::new(); let mut large_character_sets = Vec::<LargeCharacterSetInfo>::new(); // For each lex state, compute a summary of the code that needs to be // generated. let state_transition_summaries: Vec<Vec<TransitionSummary>> = lex_table .states .iter() .map(|state| { ruled_out_chars.clear(); // For each state transition, compute the set of character ranges // that need to be checked. state .advance_actions .iter() .map(|(chars, action)| { let (chars, is_included) = match chars { CharacterSet::Include(c) => (c, true), CharacterSet::Exclude(c) => (c, false), }; let mut call_id = None; let mut ranges = CharacterSet::ranges(chars, &ruled_out_chars).collect::<Vec<_>>(); if is_included { ruled_out_chars.extend(chars.iter().map(|c| *c as u32)); } else { ranges.insert(0, '\0'..'\0') } // Record any large character sets so that they can be extracted // into helper functions, reducing code duplication. if extract_helper_functions && ranges.len() > LARGE_CHARACTER_RANGE_COUNT { let char_set_symbol = self .symbol_for_advance_action(action, &lex_table) .expect("No symbol for lex state"); let mut count_for_symbol = 0; for (i, info) in large_character_sets.iter_mut().enumerate() { if info.ranges == ranges { call_id = Some(i); info.usage_count += 1; break; } if info.symbol == char_set_symbol { count_for_symbol += 1; } } if call_id.is_none() { call_id = Some(large_character_sets.len()); large_character_sets.push(LargeCharacterSetInfo { symbol: char_set_symbol, index: count_for_symbol + 1, ranges: ranges.clone(), usage_count: 1, }); } } TransitionSummary { is_included, ranges, call_id, } }) .collect() }) .collect(); // Generate a helper function for each large character set. let mut sorted_large_char_sets: Vec<_> = large_character_sets.iter().map(|e| e).collect(); sorted_large_char_sets.sort_unstable_by_key(|info| (info.symbol, info.index)); for info in sorted_large_char_sets { if info.usage_count > 1 { add_line!( self, "static inline bool {}_character_set_{}(int32_t lookahead) {{", self.symbol_ids[&info.symbol], info.index ); indent!(self); add_line!(self, "return"); indent!(self); add_whitespace!(self); self.add_character_range_conditions(&info.ranges, true, 0); add!(self, ";\n"); dedent!(self); dedent!(self); add_line!(self, "}}"); add_line!(self, ""); } } add_line!( self, "static bool {}(TSLexer *lexer, TSStateId state) {{", name ); indent!(self); add_line!(self, "START_LEXER();"); add_line!(self, "eof = lexer->eof(lexer);"); add_line!(self, "switch (state) {{"); indent!(self); for (i, state) in lex_table.states.into_iter().enumerate() { add_line!(self, "case {}:", i); indent!(self); self.add_lex_state(state, &state_transition_summaries[i], &large_character_sets); dedent!(self); } add_line!(self, "default:"); indent!(self); add_line!(self, "return false;"); dedent!(self); dedent!(self); add_line!(self, "}}"); dedent!(self); add_line!(self, "}}"); add_line!(self, ""); } fn symbol_for_advance_action( &self, action: &AdvanceAction, lex_table: &LexTable, ) -> Option<Symbol> { let mut state_ids = vec![action.state]; let mut i = 0; while i < state_ids.len() { let id = state_ids[i]; let state = &lex_table.states[id]; if let Some(accept) = state.accept_action { return Some(accept); } for (_, action) in &state.advance_actions { if !state_ids.contains(&action.state) { state_ids.push(action.state); } } i += 1; } return None; } fn add_lex_state( &mut self, state: LexState, transition_info: &Vec<TransitionSummary>, large_character_sets: &Vec<LargeCharacterSetInfo>, ) { if let Some(accept_action) = state.accept_action { add_line!(self, "ACCEPT_TOKEN({});", self.symbol_ids[&accept_action]); } if let Some(eof_action) = state.eof_action { add_line!(self, "if (eof) ADVANCE({});", eof_action.state); } for (i, (_, action)) in state.advance_actions.into_iter().enumerate() { let transition = &transition_info[i]; add_whitespace!(self); // If there is a helper function for this transition's character // set, then generate a call to that helper function. if let Some(call_id) = transition.call_id { let info = &large_character_sets[call_id]; if info.usage_count > 1 { add!(self, "if ("); if !transition.is_included { add!(self, "!"); } add!( self, "{}_character_set_{}(lookahead)) ", self.symbol_ids[&info.symbol], info.index ); self.add_advance_action(&action); add!(self, "\n"); continue; } } // Otherwise, generate code to compare the lookahead character // with all of the character ranges. if transition.ranges.len() > 0 { add!(self, "if ("); self.add_character_range_conditions(&transition.ranges, transition.is_included, 2); add!(self, ") "); } self.add_advance_action(&action); add!(self, "\n"); } add_line!(self, "END_STATE();"); } fn add_character_range_conditions( &mut self, ranges: &[Range<char>], is_included: bool, indent_count: usize, ) -> bool { let mut line_break = "\n".to_string(); for _ in 0..self.indent_level + indent_count { line_break.push_str(" "); } let mut did_add = false; for range in ranges { if is_included { if did_add { add!(self, " ||{}", line_break); } if range.end == range.start { add!(self, "lookahead == "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead == "); self.add_character(range.start); add!(self, " ||{}lookahead == ", line_break); self.add_character(range.end); } else { add!(self, "("); self.add_character(range.start); add!(self, " <= lookahead && lookahead <= "); self.add_character(range.end); add!(self, ")"); } } else { if did_add { add!(self, " &&{}", line_break); } if range.end == range.start { add!(self, "lookahead != "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead != "); self.add_character(range.start); add!(self, " &&{}lookahead != ", line_break); self.add_character(range.end); } else { add!(self, "(lookahead < "); self.add_character(range.start); add!(self, " || "); self.add_character(range.end); add!(self, " < lookahead)"); } } did_add = true; } did_add } fn add_advance_action(&mut self, action: &AdvanceAction) { if action.in_main_token { add!(self, "ADVANCE({});", action.state); } else { add!(self, "SKIP({})", action.state); } } fn add_lex_modes_list(&mut self) { add_line!(self, "static TSLexMode ts_lex_modes[STATE_COUNT] = {{"); indent!(self); for (i, state) in self.parse_table.states.iter().enumerate() { if state.is_non_terminal_extra && state.terminal_entries.len() == 1 && *state.terminal_entries.iter().next().unwrap().0 == Symbol::end() { add_line!(self, "[{}] = {{(TSStateId)(-1)}},", i,); } else if state.external_lex_state_id > 0 { add_line!( self, "[{}] = {{.lex_state = {}, .external_lex_state = {}}},", i, state.lex_state_id, state.external_lex_state_id ); } else { add_line!(self, "[{}] = {{.lex_state = {}}},", i, state.lex_state_id); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_token_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { add_line!( self, "{} = {},", self.external_token_id(&self.syntax_grammar.external_tokens[i]), i ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_symbol_map(&mut self) { add_line!( self, "static TSSymbol ts_external_scanner_symbol_map[EXTERNAL_TOKEN_COUNT] = {{" ); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { let token = &self.syntax_grammar.external_tokens[i]; let id_token = token .corresponding_internal_token .unwrap_or(Symbol::external(i)); add_line!( self, "[{}] = {},", self.external_token_id(&token), self.symbol_ids[&id_token], ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_states_list(&mut self) { add_line!( self, "static bool ts_external_scanner_states[{}][EXTERNAL_TOKEN_COUNT] = {{", self.parse_table.external_lex_states.len(), ); indent!(self); for i in 0..self.parse_table.external_lex_states.len() { if !self.parse_table.external_lex_states[i].is_empty() { add_line!(self, "[{}] = {{", i); indent!(self); for token in self.parse_table.external_lex_states[i].iter() { add_line!( self, "[{}] = true,", self.external_token_id(&self.syntax_grammar.external_tokens[token.index]) ); } dedent!(self); add_line!(self, "}},"); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parse_table(&mut self) { let mut parse_table_entries = Vec::new(); let mut next_parse_action_list_index = 0; self.get_parse_action_list_id( &ParseTableEntry { actions: Vec::new(), reusable: false, }, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "static uint16_t ts_parse_table[LARGE_STATE_COUNT][SYMBOL_COUNT] = {{", ); indent!(self); let mut terminal_entries = Vec::new(); let mut nonterminal_entries = Vec::new(); for (i, state) in self .parse_table .states .iter() .enumerate() .take(self.large_state_count) { add_line!(self, "[{}] = {{", i); indent!(self); // Ensure the entries are in a deterministic order, since they are // internally represented as a hash map. terminal_entries.clear(); nonterminal_entries.clear(); terminal_entries.extend(state.terminal_entries.iter()); nonterminal_entries.extend(state.nonterminal_entries.iter()); terminal_entries.sort_unstable_by_key(|e| self.symbol_order.get(e.0)); nonterminal_entries.sort_unstable_by_key(|k| k.0); for (symbol, action) in &nonterminal_entries { add_line!( self, "[{}] = STATE({}),", self.symbol_ids[symbol], match action { GotoAction::Goto(state) => *state, GotoAction::ShiftExtra => i, } ); } for (symbol, entry) in &terminal_entries { let entry_id = self.get_parse_action_list_id( entry, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "[{}] = ACTIONS({}),", self.symbol_ids[symbol], entry_id ); } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); if self.large_state_count < self.parse_table.states.len() { add_line!(self, "static uint16_t ts_small_parse_table[] = {{"); indent!(self); let mut index = 0; let mut small_state_indices = Vec::new(); let mut symbols_by_value: HashMap<(usize, SymbolType), Vec<Symbol>> = HashMap::new(); for state in self.parse_table.states.iter().skip(self.large_state_count) { small_state_indices.push(index); symbols_by_value.clear(); terminal_entries.clear(); terminal_entries.extend(state.terminal_entries.iter()); terminal_entries.sort_unstable_by_key(|e| self.symbol_order.get(e.0)); // In a given parse state, many lookahead symbols have the same actions. // So in the "small state" representation, group symbols by their action // in order to avoid repeating the action. for (symbol, entry) in &terminal_entries { let entry_id = self.get_parse_action_list_id( entry, &mut parse_table_entries, &mut next_parse_action_list_index, ); symbols_by_value .entry((entry_id, SymbolType::Terminal)) .or_default() .push(**symbol); } for (symbol, action) in &state.nonterminal_entries { let state_id = match action { GotoAction::Goto(i) => *i, GotoAction::ShiftExtra => { self.large_state_count + small_state_indices.len() - 1 } }; symbols_by_value .entry((state_id, SymbolType::NonTerminal)) .or_default() .push(*symbol); } let mut values_with_symbols = symbols_by_value.drain().collect::<Vec<_>>(); values_with_symbols.sort_unstable_by_key(|((value, kind), symbols)| { (symbols.len(), *kind, *value, symbols[0]) }); add_line!(self, "[{}] = {},", index, values_with_symbols.len()); indent!(self); for ((value, kind), symbols) in values_with_symbols.iter_mut() { if *kind == SymbolType::NonTerminal { add_line!(self, "STATE({}), {},", value, symbols.len()); } else { add_line!(self, "ACTIONS({}), {},", value, symbols.len()); } symbols.sort_unstable(); indent!(self); for symbol in symbols { add_line!(self, "{},", self.symbol_ids[symbol]); } dedent!(self); } dedent!(self); index += 1 + values_with_symbols .iter() .map(|(_, symbols)| 2 + symbols.len()) .sum::<usize>(); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); add_line!(self, "static uint32_t ts_small_parse_table_map[] = {{"); indent!(self); for i in self.large_state_count..self.parse_table.states.len() { add_line!( self, "[SMALL_STATE({})] = {},", i, small_state_indices[i - self.large_state_count] ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } self.add_parse_action_list(parse_table_entries); } fn add_parse_action_list(&mut self, parse_table_entries: Vec<(usize, ParseTableEntry)>) { add_line!(self, "static TSParseActionEntry ts_parse_actions[] = {{"); indent!(self); for (i, entry) in parse_table_entries { add!( self, " [{}] = {{.entry = {{.count = {}, .reusable = {}}}}},", i, entry.actions.len(), entry.reusable ); for action in entry.actions { add!(self, " "); match action { ParseAction::Accept => add!(self, " ACCEPT_INPUT()"), ParseAction::Recover => add!(self, "RECOVER()"), ParseAction::ShiftExtra => add!(self, "SHIFT_EXTRA()"), ParseAction::Shift { state, is_repetition, } => { if is_repetition { add!(self, "SHIFT_REPEAT({})", state); } else { add!(self, "SHIFT({})", state); } } ParseAction::Reduce { symbol, child_count, dynamic_precedence, production_id, .. } => { add!(self, "REDUCE({}, {}", self.symbol_ids[&symbol], child_count); if dynamic_precedence != 0 { add!(self, ", .dynamic_precedence = {}", dynamic_precedence); } if production_id != 0 { add!(self, ", .production_id = {}", production_id); } add!(self, ")"); } } add!(self, ",") } add!(self, "\n"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parser_export(&mut self) { let language_function_name = format!("tree_sitter_{}", self.language_name); let external_scanner_name = format!("{}_external_scanner", language_function_name); add_line!(self, "#ifdef __cplusplus"); add_line!(self, r#"extern "C" {{"#); add_line!(self, "#endif"); if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, "void *{}_create(void);", external_scanner_name); add_line!(self, "void {}_destroy(void *);", external_scanner_name); add_line!( self, "bool {}_scan(void *, TSLexer *, const bool *);", external_scanner_name ); add_line!( self, "unsigned {}_serialize(void *, char *);", external_scanner_name ); add_line!( self, "void {}_deserialize(void *, const char *, unsigned);", external_scanner_name ); add_line!(self, ""); } add_line!(self, "#ifdef _WIN32"); add_line!(self, "#define extern __declspec(dllexport)"); add_line!(self, "#endif"); add_line!(self, ""); add_line!( self, "extern const TSLanguage *{}(void) {{", language_function_name ); indent!(self); add_line!(self, "static TSLanguage language = {{"); indent!(self); add_line!(self, ".version = LANGUAGE_VERSION,"); add_line!(self, ".symbol_count = SYMBOL_COUNT,"); add_line!(self, ".alias_count = ALIAS_COUNT,"); add_line!(self, ".token_count = TOKEN_COUNT,"); add_line!(self, ".external_token_count = EXTERNAL_TOKEN_COUNT,"); add_line!(self, ".symbol_names = ts_symbol_names,"); add_line!(self, ".symbol_metadata = ts_symbol_metadata,"); add_line!(self, ".parse_table = (const uint16_t *)ts_parse_table,"); add_line!(self, ".parse_actions = ts_parse_actions,"); add_line!(self, ".lex_modes = ts_lex_modes,"); if !self.parse_table.production_infos.is_empty() { add_line!( self, ".alias_sequences = (const TSSymbol *)ts_alias_sequences," ); } add_line!( self, ".max_alias_sequence_length = MAX_ALIAS_SEQUENCE_LENGTH," ); add_line!(self, ".lex_fn = ts_lex,"); if let Some(keyword_capture_token) = self.keyword_capture_token { add_line!(self, ".keyword_lex_fn = ts_lex_keywords,"); add_line!( self, ".keyword_capture_token = {},", self.symbol_ids[&keyword_capture_token] ); } if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, ".external_scanner = {{"); indent!(self); add_line!(self, "(const bool *)ts_external_scanner_states,"); add_line!(self, "ts_external_scanner_symbol_map,"); add_line!(self, "{}_create,", external_scanner_name); add_line!(self, "{}_destroy,", external_scanner_name); add_line!(self, "{}_scan,", external_scanner_name); add_line!(self, "{}_serialize,", external_scanner_name); add_line!(self, "{}_deserialize,", external_scanner_name); dedent!(self); add_line!(self, "}},"); } add_line!(self, ".field_count = FIELD_COUNT,"); if !self.field_names.is_empty() { add_line!( self, ".field_map_slices = (const TSFieldMapSlice *)ts_field_map_slices," ); add_line!( self, ".field_map_entries = (const TSFieldMapEntry *)ts_field_map_entries," ); add_line!(self, ".field_names = ts_field_names,"); } add_line!(self, ".large_state_count = LARGE_STATE_COUNT,"); if self.large_state_count < self.parse_table.states.len() { add_line!( self, ".small_parse_table = (const uint16_t *)ts_small_parse_table," ); add_line!( self, ".small_parse_table_map = (const uint32_t *)ts_small_parse_table_map," ); } add_line!(self, ".public_symbol_map = ts_symbol_map,"); if self.next_abi { add_line!(self, ".alias_map = ts_non_terminal_alias_map,"); add_line!(self, ".state_count = STATE_COUNT,"); } dedent!(self); add_line!(self, "}};"); add_line!(self, "return &language;"); dedent!(self); add_line!(self, "}}"); add_line!(self, "#ifdef __cplusplus"); add_line!(self, "}}"); add_line!(self, "#endif"); } fn get_parse_action_list_id( &self, entry: &ParseTableEntry, parse_table_entries: &mut Vec<(usize, ParseTableEntry)>, next_parse_action_list_index: &mut usize, ) -> usize { if let Some((index, _)) = parse_table_entries.iter().find(|(_, e)| *e == *entry) { return *index; } let result = *next_parse_action_list_index; parse_table_entries.push((result, entry.clone())); *next_parse_action_list_index += 1 + entry.actions.len(); result } fn get_field_map_id( &self, flat_field_map: &Vec<(String, FieldLocation)>, flat_field_maps: &mut Vec<(usize, Vec<(String, FieldLocation)>)>, next_flat_field_map_index: &mut usize, ) -> usize { if let Some((index, _)) = flat_field_maps.iter().find(|(_, e)| *e == *flat_field_map) { return *index; } let result = *next_flat_field_map_index; flat_field_maps.push((result, flat_field_map.clone())); *next_flat_field_map_index += flat_field_map.len(); result } fn external_token_id(&self, token: &ExternalToken) -> String { format!( "ts_external_token_{}", self.sanitize_identifier(&token.name) ) } fn assign_symbol_id(&mut self, symbol: Symbol, used_identifiers: &mut HashSet<String>) { let mut id; if symbol == Symbol::end() { id = "ts_builtin_sym_end".to_string(); } else { let (name, kind) = self.metadata_for_symbol(symbol); id = match kind { VariableType::Auxiliary => format!("aux_sym_{}", self.sanitize_identifier(name)), VariableType::Anonymous => format!("anon_sym_{}", self.sanitize_identifier(name)), VariableType::Hidden | VariableType::Named => { format!("sym_{}", self.sanitize_identifier(name)) } }; let mut suffix_number = 1; let mut suffix = String::new(); while used_identifiers.contains(&id) { id.drain(id.len() - suffix.len()..); suffix_number += 1; suffix = suffix_number.to_string(); id += &suffix; } } used_identifiers.insert(id.clone()); self.symbol_ids.insert(symbol, id); } fn field_id(&self, field_name: &String) -> String { format!("field_{}", field_name) } fn metadata_for_symbol(&self, symbol: Symbol) -> (&str, VariableType) { match symbol.kind { SymbolType::End => ("end", VariableType::Hidden), SymbolType::NonTerminal => { let variable = &self.syntax_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::Terminal => { let variable = &self.lexical_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::External => { let token = &self.syntax_grammar.external_tokens[symbol.index]; (&token.name, token.kind) } } } fn sanitize_identifier(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' { result.push(c); } else { let replacement = match c { '~' => "TILDE", '`' => "BQUOTE", '!' => "BANG", '@' => "AT", '#' => "POUND", '$' => "DOLLAR", '%' => "PERCENT", '^' => "CARET", '&' => "AMP", '*' => "STAR", '(' => "LPAREN", ')' => "RPAREN", '-' => "DASH", '+' => "PLUS", '=' => "EQ", '{' => "LBRACE", '}' => "RBRACE", '[' => "LBRACK", ']' => "RBRACK", '\\' => "BSLASH", '|' => "PIPE", ':' => "COLON", ';' => "SEMI", '"' => "DQUOTE", '\'' => "SQUOTE", '<' => "LT", '>' => "GT", ',' => "COMMA", '.' => "DOT", '?' => "QMARK", '/' => "SLASH", '\n' => "LF", '\r' => "CR", '\t' => "TAB", _ => continue, }; if !result.is_empty() && !result.ends_with("_") { result.push('_'); } result += replacement; } } result } fn sanitize_string(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { match c { '\"' => result += "\\\"", '?' => result += "\\?", '\\' => result += "\\\\", '\u{000c}' => result += "\\f", '\n' => result += "\\n", '\r' => result += "\\r", '\t' => result += "\\t", _ => result.push(c), } } result } fn add_character(&mut self, c: char) { match c { '\'' => add!(self, "'\\''"), '\\' => add!(self, "'\\\\'"), '\u{000c}' => add!(self, "'\\f'"), '\n' => add!(self, "'\\n'"), '\t' => add!(self, "'\\t'"), '\r' => add!(self, "'\\r'"), _ => { if c == ' ' || c.is_ascii_graphic() { add!(self, "'{}'", c) } else { add!(self, "{}", c as u32) } } } } } /// Returns a String of C code for the given components of a parser. /// /// # Arguments /// /// * `name` - A string slice containing the name of the language /// * `parse_table` - The generated parse table for the language /// * `main_lex_table` - The generated lexing table for the language /// * `keyword_lex_table` - The generated keyword lexing table for the language /// * `keyword_capture_token` - A symbol indicating which token is used /// for keyword capture, if any. /// * `syntax_grammar` - The syntax grammar extracted from the language's grammar /// * `lexical_grammar` - The lexical grammar extracted from the language's grammar /// * `simple_aliases` - A map describing the global rename rules that should apply. /// the keys are symbols that are *always* aliased in the same way, and the values /// are the aliases that are applied to those symbols. /// * `next_abi` - A boolean indicating whether to opt into the new, unstable parse /// table format. This is mainly used for testing, when developing Tree-sitter itself. pub(crate) fn render_c_code( name: &str, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, next_abi: bool, ) -> String { Generator { buffer: String::new(), indent_level: 0, language_name: name.to_string(), large_state_count: 0, parse_table, main_lex_table, keyword_lex_table, keyword_capture_token, syntax_grammar, lexical_grammar, simple_aliases, symbol_ids: HashMap::new(), symbol_order: HashMap::new(), alias_ids: HashMap::new(), symbol_map: HashMap::new(), unique_aliases: Vec::new(), field_names: Vec::new(), next_abi, } .generate() }
{ let mut flat_field_maps = vec![]; let mut next_flat_field_map_index = 0; self.get_field_map_id( &Vec::new(), &mut flat_field_maps, &mut next_flat_field_map_index, ); let mut field_map_ids = Vec::new(); for production_info in &self.parse_table.production_infos { if !production_info.field_map.is_empty() { let mut flat_field_map = Vec::new(); for (field_name, locations) in &production_info.field_map { for location in locations { flat_field_map.push((field_name.clone(), *location)); } } field_map_ids.push(( self.get_field_map_id( &flat_field_map, &mut flat_field_maps, &mut next_flat_field_map_index, ), flat_field_map.len(), )); } else { field_map_ids.push((0, 0)); } } add_line!( self, "static const TSFieldMapSlice ts_field_map_slices[{}] = {{", self.parse_table.production_infos.len(), ); indent!(self); for (production_id, (row_id, length)) in field_map_ids.into_iter().enumerate() { if length > 0 { add_line!( self, "[{}] = {{.index = {}, .length = {}}},", production_id, row_id, length ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); add_line!( self, "static const TSFieldMapEntry ts_field_map_entries[] = {{", ); indent!(self); for (row_index, field_pairs) in flat_field_maps.into_iter().skip(1) { add_line!(self, "[{}] =", row_index); indent!(self); for (field_name, location) in field_pairs { add_whitespace!(self); add!(self, "{{{}, {}", self.field_id(&field_name), location.index); if location.inherited { add!(self, ", .inherited = true"); } add!(self, "}},\n"); } dedent!(self); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); }
logutils.go
package errtools import "github.com/go-errors/errors" func Wrap(e interface{}) error
{ if e == nil { return nil } return errors.Wrap(e, 1) }
chart_remove.go
/* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package action import ( "io" "github.com/hinfinite/helm/internal/experimental/registry" ) // ChartRemove performs a chart remove operation. type ChartRemove struct { cfg *Configuration } // NewChartRemove creates a new ChartRemove object with the given configuration. func NewChartRemove(cfg *Configuration) *ChartRemove { return &ChartRemove{ cfg: cfg, } } // Run executes the chart remove operation func (a *ChartRemove) Run(out io.Writer, ref string) error { r, err := registry.ParseReference(ref) if err != nil
return a.cfg.RegistryClient.RemoveChart(r) }
{ return err }
version.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package create import ( "log" "os" "path/filepath" "regexp" "strings" "sigs.k8s.io/apiserver-builder-alpha/cmd/apiserver-boot/boot/util" "github.com/spf13/cobra" ) var versionName string var ignoreVersionExists bool = false var createVersionCmd = &cobra.Command{ Use: "version", Short: "Creates an API group and version", Long: `Creates an API group and version. Will not recreate group if already exists.`, Run: RunCreateVersion, } func AddCreateVersion(cmd *cobra.Command) { createVersionCmd.Flags().StringVar(&groupName, "group", "", "name of the API group to create") createVersionCmd.Flags().StringVar(&versionName, "version", "", "name of the API version to create") cmd.AddCommand(createVersionCmd) createVersionCmd.AddCommand(createResourceCmd) } func
(cmd *cobra.Command, args []string) { if _, err := os.Stat("pkg"); err != nil { log.Fatalf("could not find 'pkg' directory. must run apiserver-boot init before creating resources") } util.GetDomain() if len(groupName) == 0 { log.Fatalf("Must specify --group") } if len(versionName) == 0 { log.Fatalf("Must specify --version") } if strings.ToLower(groupName) != groupName { log.Fatalf("--group must be lowercase was (%s)", groupName) } versionMatch := regexp.MustCompile("^v\\d+(alpha\\d+|beta\\d+)*$") if !versionMatch.MatchString(versionName) { log.Fatalf( "--version has bad format. must match ^v\\d+(alpha\\d+|beta\\d+)*$. "+ "e.g. v1alpha1,v1beta1,v1 was(%s)", versionName) } cr := util.GetCopyright(copyright) ignoreGroupExists = true createGroup(cr) createVersion(cr) } func createVersion(boilerplate string) { dir, err := os.Getwd() if err != nil { log.Fatalf("%v\n", err) os.Exit(-1) } path := filepath.Join(dir, "pkg", "apis", groupName, versionName, "doc.go") created := util.WriteIfNotFound(path, "version-template", versionTemplate, versionTemplateArgs{ boilerplate, util.Domain, groupName, versionName, util.Repo, }) if !created && !ignoreVersionExists { log.Fatalf("API group version %s/%s already exists.", groupName, versionName) } } type versionTemplateArgs struct { BoilerPlate string Domain string Group string Version string Repo string } var versionTemplate = ` {{.BoilerPlate}} // Api versions allow the api contract for a resource to be changed while keeping // backward compatibility by support multiple concurrent versions // of the same resource // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen={{.Repo}}/pkg/apis/{{.Group}} // +k8s:defaulter-gen=TypeMeta // +groupName={{.Group}}.{{.Domain}} package {{.Version}} // import "{{.Repo}}/pkg/apis/{{.Group}}/{{.Version}}" `
RunCreateVersion
Header.js
import React from 'react'; import { mount } from 'enzyme'; import renderer from 'react-test-renderer'; import Header from '../../components/Header'; import 'jest-styled-components'; describe('<Header />', () => { let nagIndex, nagNew; beforeEach(() => { nagIndex = jest.fn(); nagNew = jest.fn(); }); it('should match snapshot', () => { const tree = renderer .create(<Header nagIndex={nagIndex} nagNew={nagNew} />) .toJSON(); expect(tree).toMatchSnapshot(); }); it('should have correct elements and children', () => { const header = mount(<Header nagIndex={nagIndex} nagNew={nagNew} />); expect(header.find('header').length).toBe(1); expect(header.find('a[href="https://www.gauti.info/nag-me"]').length).toBe(
}); it('should call nagIndex', () => { const header = mount(<Header nagIndex={nagIndex} nagNew={nagNew} />); header.find('button[title="Nags Index"]').simulate('click'); expect(nagIndex.mock.calls.length).toBe(1); }); it('should call nagNew', () => { const header = mount(<Header nagIndex={nagIndex} nagNew={nagNew} />); header.find('button[title="Create a nag"]').simulate('click'); expect(nagNew.mock.calls.length).toBe(1); }); });
1 ); expect(header.find('button[title="Nags Index"]').length).toBe(1); expect(header.find('button[title="Create a nag"]').length).toBe(1);
directives.js
(function () { 'use strict'; var app = angular.module('myApp.directives', []); // Alternative of ng-include .. app.directive('myInclude', function () { return { replace: true, restrict: 'A', templateUrl: function (iElement, iAttrs) { if (!iAttrs.myInclude) throw new Error("my-include: template url must be provided");
return iAttrs.myInclude; } }; }); })();
ty_expect.rs
#![allow(unused)] use super::*; /// 式の型に与えられる制約。 #[derive(Clone)] pub(crate) enum TyExpect { /// 未実装部分 Todo, Unknown, BoolOrInt, Number, NumberOrPtr, IsizeOrUsize, Exact(KTy2), } impl TyExpect { pub(crate) fn unit() -> TyExpect { TyExpect::Exact(KTy2::Un
pect { TyExpect::Exact(KTy2::BOOL) } pub(crate) fn from(ty: &KTy2) -> Self { if ty.is_unresolved() { Self::Todo } else { TyExpect::Exact(ty.clone()) } } pub(crate) fn as_number(&self) -> Option<KNumberTy> { match self { TyExpect::Exact(KTy2::Number(it)) => Some(*it), _ => None, } } pub(crate) fn try_unwrap_ptr(self, ty_env: &KTyEnv) -> Self { match self { TyExpect::NumberOrPtr => TyExpect::Unknown, TyExpect::Exact(ty) => match ty.as_ptr(ty_env) { Some((_, ty)) => { // FIXME: TyExpect に型を所有させる必要がある? // TyExpect::Exact(&ty), TyExpect::Todo } None => TyExpect::Unknown, }, _ => TyExpect::Todo, } } pub(crate) fn meet(self, other: Self) -> Self { // TODO: ちゃんと実装する match self { TyExpect::Todo => other, _ => self, } } pub(crate) fn display(&self, ty_env: &KTyEnv, mod_outline: &KModOutline) -> String { match self { TyExpect::Todo => "TODO".into(), TyExpect::Unknown => "unknown".into(), TyExpect::BoolOrInt => "(bool | iNN | uNN | cNN)".into(), TyExpect::Number => "(iNN | uNN | fNN | cNN)".into(), TyExpect::NumberOrPtr => "(iNN | uNN | fNN | cNN | *unknown | *mut unknown)".into(), TyExpect::IsizeOrUsize => "(isize | usize)".into(), TyExpect::Exact(ty) => ty.display(ty_env, mod_outline), } } }
it) } pub(crate) fn bool() -> TyEx
flow_schema_status.rs
// Generated from definition io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus /// FlowSchemaStatus represents the current state of a FlowSchema. #[derive(Clone, Debug, Default, PartialEq)] pub struct FlowSchemaStatus { /// `conditions` is a list of the current states of FlowSchema. pub conditions: Option<Vec<crate::api::flowcontrol::v1alpha1::FlowSchemaCondition>>, } impl<'de> crate::serde::Deserialize<'de> for FlowSchemaStatus { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_conditions, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "conditions" => Field::Key_conditions, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = FlowSchemaStatus; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("FlowSchemaStatus") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_conditions: Option<Vec<crate::api::flowcontrol::v1alpha1::FlowSchemaCondition>> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_conditions => value_conditions = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(FlowSchemaStatus { conditions: value_conditions, }) } } deserializer.deserialize_struct( "FlowSchemaStatus", &[ "conditions", ], Visitor, ) } } impl crate::serde::Serialize for FlowSchemaStatus { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( "FlowSchemaStatus", self.conditions.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.conditions { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "conditions", value)?; } crate::serde::ser::SerializeStruct::end(state) } } #[cfg(feature = "schemars")] impl crate::schemars::JsonSchema for FlowSchemaStatus { fn schema_name() -> String { "io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus".to_owned() } fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema { crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("FlowSchemaStatus represents the current state of a FlowSchema.".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))), object: Some(Box::new(crate::schemars::schema::ObjectValidation { properties: IntoIterator::into_iter([ ( "conditions".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("`conditions` is a list of the current states of FlowSchema.".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Array))), array: Some(Box::new(crate::schemars::schema::ArrayValidation { items: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(__gen.subschema_for::<crate::api::flowcontrol::v1alpha1::FlowSchemaCondition>()))), ..Default::default() })), ..Default::default() }), ), ]).collect(), ..Default::default() })),
..Default::default() }) } }
libwallet.rs
// Copyright 2019 The Kepler Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! core::libtx specific tests use kepler_wallet_libwallet::Context; use kepler_wallet_util::kepler_core::core::transaction; use kepler_wallet_util::kepler_core::libtx::{aggsig, proof}; use kepler_wallet_util::kepler_keychain::{ BlindSum, BlindingFactor, ExtKeychain, ExtKeychainPath, Keychain, SwitchCommitmentType, }; use kepler_wallet_util::kepler_util::secp; use kepler_wallet_util::kepler_util::secp::key::{PublicKey, SecretKey}; use rand::thread_rng; fn kernel_sig_msg() -> secp::Message { transaction::KernelFeatures::Plain { fee: 0 } .kernel_sig_msg() .unwrap() } #[test] fn aggsig_sender_receiver_interaction() { let parent = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); let switch = &SwitchCommitmentType::Regular; let sender_keychain = ExtKeychain::from_random_seed(true).unwrap(); let receiver_keychain = ExtKeychain::from_random_seed(true).unwrap(); // Calculate the kernel excess here for convenience. // Normally this would happen during transaction building. let kernel_excess = { let id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let skey1 = sender_keychain.derive_key(0, &id1, switch).unwrap(); let skey2 = receiver_keychain.derive_key(0, &id1, switch).unwrap(); let keychain = ExtKeychain::from_random_seed(true).unwrap(); let blinding_factor = keychain .blind_sum( &BlindSum::new() .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) .add_blinding_factor(BlindingFactor::from_secret_key(skey2)), ) .unwrap(); keychain .secp() .commit(0, blinding_factor.secret_key(&keychain.secp()).unwrap()) .unwrap() }; let s_cx; let mut rx_cx; // sender starts the tx interaction let (sender_pub_excess, _sender_pub_nonce) = { let keychain = sender_keychain.clone(); let id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let skey = keychain.derive_key(0, &id1, switch).unwrap(); // dealing with an input here so we need to negate the blinding_factor // rather than use it as is let bs = BlindSum::new(); let blinding_factor = keychain .blind_sum(&bs.sub_blinding_factor(BlindingFactor::from_secret_key(skey))) .unwrap(); let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); s_cx = Context::new(&keychain.secp(), blind, &parent, false, 0); s_cx.get_public_keys(&keychain.secp()) }; let pub_nonce_sum; let pub_key_sum; // receiver receives partial tx let (receiver_pub_excess, _receiver_pub_nonce, rx_sig_part) = { let keychain = receiver_keychain.clone(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); // let blind = blind_sum.secret_key(&keychain.secp())?; let blind = keychain.derive_key(0, &key_id, switch).unwrap(); rx_cx = Context::new(&keychain.secp(), blind, &parent, false, 1); let (pub_excess, pub_nonce) = rx_cx.get_public_keys(&keychain.secp()); rx_cx.add_output(&key_id, &None, 0); pub_nonce_sum = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).1, &rx_cx.get_public_keys(keychain.secp()).1, ], ) .unwrap(); pub_key_sum = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).0, &rx_cx.get_public_keys(keychain.secp()).0, ], ) .unwrap(); let msg = kernel_sig_msg(); let sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &rx_cx.sec_key, &rx_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); (pub_excess, pub_nonce, sig_part) }; // check the sender can verify the partial signature // received in the response back from the receiver { let keychain = sender_keychain.clone(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_partial_sig( &keychain.secp(), &rx_sig_part, &pub_nonce_sum, &receiver_pub_excess, Some(&pub_key_sum), &msg, ); assert!(!sig_verifies.is_err()); } // now sender signs with their key let sender_sig_part = { let keychain = sender_keychain.clone(); let msg = kernel_sig_msg(); let sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &s_cx.sec_key, &s_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); sig_part }; // check the receiver can verify the partial signature // received by the sender { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_partial_sig( &keychain.secp(), &sender_sig_part, &pub_nonce_sum, &sender_pub_excess, Some(&pub_key_sum), &msg, ); assert!(!sig_verifies.is_err()); } // Receiver now builds final signature from sender and receiver parts let (final_sig, final_pubkey) = { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); let our_sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &rx_cx.sec_key, &rx_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); // Receiver now generates final signature from the two parts let final_sig = aggsig::add_signatures( &keychain.secp(), vec![&sender_sig_part, &our_sig_part], &pub_nonce_sum, ) .unwrap(); // Receiver calculates the final public key (to verify sig later) let final_pubkey = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).0, &rx_cx.get_public_keys(keychain.secp()).0, ], ) .unwrap(); (final_sig, final_pubkey) }; // Receiver checks the final signature verifies { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); // Receiver check the final signature verifies let sig_verifies = aggsig::verify_completed_sig( &keychain.secp(), &final_sig, &final_pubkey, Some(&final_pubkey), &msg, ); assert!(!sig_verifies.is_err()); } // Check we can verify the sig using the kernel excess { let keychain = ExtKeychain::from_random_seed(true).unwrap(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_single_from_commit(&keychain.secp(), &final_sig, &msg, &kernel_excess); assert!(!sig_verifies.is_err()); } } #[test] fn aggsig_sender_receiver_interaction_offset() { let parent = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); let switch = &SwitchCommitmentType::Regular; let sender_keychain = ExtKeychain::from_random_seed(true).unwrap(); let receiver_keychain = ExtKeychain::from_random_seed(true).unwrap(); // This is the kernel offset that we use to split the key // Summing these at the block level prevents the // kernels from being used to reconstruct (or identify) individual transactions let kernel_offset = SecretKey::new(&sender_keychain.secp(), &mut thread_rng()); // Calculate the kernel excess here for convenience. // Normally this would happen during transaction building. let kernel_excess = { let id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let skey1 = sender_keychain.derive_key(0, &id1, switch).unwrap(); let skey2 = receiver_keychain.derive_key(0, &id1, switch).unwrap(); let keychain = ExtKeychain::from_random_seed(true).unwrap(); let blinding_factor = keychain .blind_sum( &BlindSum::new() .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) .add_blinding_factor(BlindingFactor::from_secret_key(skey2)) // subtract the kernel offset here like as would when // verifying a kernel signature .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset.clone())), ) .unwrap(); keychain .secp() .commit(0, blinding_factor.secret_key(&keychain.secp()).unwrap()) .unwrap() }; let s_cx; let mut rx_cx; // sender starts the tx interaction let (sender_pub_excess, _sender_pub_nonce) = { let keychain = sender_keychain.clone(); let id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let skey = keychain.derive_key(0, &id1, switch).unwrap(); // dealing with an input here so we need to negate the blinding_factor // rather than use it as is let blinding_factor = keychain .blind_sum( &BlindSum::new() .sub_blinding_factor(BlindingFactor::from_secret_key(skey)) // subtract the kernel offset to create an aggsig context // with our "split" key .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)), ) .unwrap(); let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); s_cx = Context::new(&keychain.secp(), blind, &parent, false, 0); s_cx.get_public_keys(&keychain.secp()) }; // receiver receives partial tx let pub_nonce_sum; let pub_key_sum; let (receiver_pub_excess, _receiver_pub_nonce, sig_part) = { let keychain = receiver_keychain.clone(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let blind = keychain.derive_key(0, &key_id, switch).unwrap(); rx_cx = Context::new(&keychain.secp(), blind, &parent, false, 1); let (pub_excess, pub_nonce) = rx_cx.get_public_keys(&keychain.secp()); rx_cx.add_output(&key_id, &None, 0); pub_nonce_sum = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).1, &rx_cx.get_public_keys(keychain.secp()).1, ], ) .unwrap(); pub_key_sum = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).0, &rx_cx.get_public_keys(keychain.secp()).0, ], ) .unwrap(); let msg = kernel_sig_msg(); let sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &rx_cx.sec_key, &rx_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); (pub_excess, pub_nonce, sig_part) }; // check the sender can verify the partial signature // received in the response back from the receiver { let keychain = sender_keychain.clone(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_partial_sig( &keychain.secp(), &sig_part, &pub_nonce_sum, &receiver_pub_excess, Some(&pub_key_sum), &msg, ); assert!(!sig_verifies.is_err()); } // now sender signs with their key let sender_sig_part = { let keychain = sender_keychain.clone(); let msg = kernel_sig_msg(); let sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &s_cx.sec_key, &s_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); sig_part }; // check the receiver can verify the partial signature // received by the sender { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_partial_sig( &keychain.secp(), &sender_sig_part, &pub_nonce_sum, &sender_pub_excess, Some(&pub_key_sum), &msg, ); assert!(!sig_verifies.is_err()); } // Receiver now builds final signature from sender and receiver parts let (final_sig, final_pubkey) = { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); let our_sig_part = aggsig::calculate_partial_sig( &keychain.secp(), &rx_cx.sec_key, &rx_cx.sec_nonce, &pub_nonce_sum, Some(&pub_key_sum), &msg, ) .unwrap(); // Receiver now generates final signature from the two parts let final_sig = aggsig::add_signatures( &keychain.secp(), vec![&sender_sig_part, &our_sig_part], &pub_nonce_sum, ) .unwrap(); // Receiver calculates the final public key (to verify sig later) let final_pubkey = PublicKey::from_combination( keychain.secp(), vec![ &s_cx.get_public_keys(keychain.secp()).0, &rx_cx.get_public_keys(keychain.secp()).0, ], ) .unwrap(); (final_sig, final_pubkey) }; // Receiver checks the final signature verifies { let keychain = receiver_keychain.clone(); let msg = kernel_sig_msg(); // Receiver check the final signature verifies let sig_verifies = aggsig::verify_completed_sig( &keychain.secp(), &final_sig, &final_pubkey, Some(&final_pubkey), &msg, ); assert!(!sig_verifies.is_err()); } // Check we can verify the sig using the kernel excess { let keychain = ExtKeychain::from_random_seed(true).unwrap(); let msg = kernel_sig_msg(); let sig_verifies = aggsig::verify_single_from_commit(&keychain.secp(), &final_sig, &msg, &kernel_excess); assert!(!sig_verifies.is_err()); } } #[test] fn
() { let keychain = ExtKeychain::from_random_seed(true).unwrap(); let builder = proof::ProofBuilder::new(&keychain); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0); let switch = &SwitchCommitmentType::Regular; let commit = keychain.commit(5, &key_id, switch).unwrap(); let extra_data = [99u8; 64]; let proof = proof::create( &keychain, &builder, 5, &key_id, switch, commit, Some(extra_data.to_vec().clone()), ) .unwrap(); let proof_info = proof::rewind( keychain.secp(), &builder, commit, Some(extra_data.to_vec().clone()), proof, ) .unwrap(); assert!(proof_info.is_some()); let (r_amount, r_key_id, r_switch) = proof_info.unwrap(); assert_eq!(r_amount, 5); assert_eq!(r_key_id, key_id); assert_eq!(&r_switch, switch); // cannot rewind with a different commit let commit2 = keychain.commit(5, &key_id2, switch).unwrap(); let proof_info = proof::rewind( keychain.secp(), &builder, commit2, Some(extra_data.to_vec().clone()), proof, ) .unwrap(); assert!(proof_info.is_none()); // cannot rewind with a commitment to a different value let commit3 = keychain.commit(4, &key_id, switch).unwrap(); let proof_info = proof::rewind( keychain.secp(), &builder, commit3, Some(extra_data.to_vec().clone()), proof, ) .unwrap(); assert!(proof_info.is_none()); // cannot rewind with wrong extra committed data let wrong_extra_data = [98u8; 64]; let proof_info = proof::rewind( keychain.secp(), &builder, commit, Some(wrong_extra_data.to_vec().clone()), proof, ) .unwrap(); assert!(proof_info.is_none()); }
test_rewind_range_proof
intelix.py
import validators import requests import base64 from app import app def intelixlookup(ioc): #Get a token token = get_token() # use Validators to redirect the IOC to the correct Intelix endpoint if validators.ipv4(ioc): u = f"https://de.api.labs.sophos.com/lookup/ips/v1/{ioc}" elif validators.md5(ioc): u = f"https://de.api.labs.sophos.com/lookup/urls/v1/{ioc}" elif validators.sha256(ioc): u = f"https://de.api.labs.sophos.com/lookup/files/v1/{ioc}" h = {"Authorization": f"{token}"} r = requests.get(u, headers=h) j = r.json() response = {} # File reponses if validators.sha256(ioc) or validators.md5(ioc): if 'reputationScore' in j: response['reputationScore'] = j['reputationScore'] if j['reputationScore'] <= 19: response['fileReputation'] = 'Malware' elif j['reputationScore'] <= 29: response['fileReputation'] = 'PUA (potentially unwanted application)' elif j['reputationScore'] <= 69: response['fileReputation'] = 'Unknown/suspicious' elif j['reputationScore'] <= 100: response['fileReputation'] = 'Known good' if 'detectionName' in j: response['detectionName'] = j['detectionName'] response['type'] = 'File Hash' # IP reponses
else: response['category'] = 'Unknown IP Address' if 'ttl' in j: response['ttl'] = j['ttl'] response['type'] = 'IP Address' # Generic consistent repsponses if 'correlationId' in j: response['correlationId'] = j['correlationId'] if 'requestId' in j: response['requestId'] = j['requestId'] # Generic Error Handling based on reponses # https://api.labs.sophos.com/doc/lookup/ips.html # https://api.labs.sophos.com/doc/lookup/files.html if 'error' in j: response['error'] = j['error'] if 'message' in j: response['message'] = j['message'] # Return a dict, flask will return this as JSON to the browser return response def get_token(): # This is lazy, the token should be stored for quicker request times. creds = f"{app.config['INTELIX_CLIENT_ID']}:{app.config['INTELIX_CLIENT_SECRET']}" t = base64.b64encode(creds.encode("UTF-8")).decode("ascii") d = {'grant_type': 'client_credentials'} h = {'Authorization': f"Basic {t}", 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post('https://api.labs.sophos.com/oauth2/token', headers=h, data=d) r = r.json() return r['access_token']
if validators.ipv4(ioc): if 'category' in j: response['category'] = j['category']
hello_world.py
import logging def
(n): logging.basicConfig(level=logging.DEBUG) for i in range(n): logging.info(str(i) + ": Hello world") say(1) if __name__=="__main__": say(3)
say
0008_alter_post_content.py
# Generated by Django 3.2.4 on 2021-09-11 12:44 import ckeditor_uploader.fields from django.db import migrations class Migration(migrations.Migration):
operations = [ migrations.AlterField( model_name='post', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(), ), ]
dependencies = [ ('blog', '0007_subscriber'), ]
sticky_regex.rs
use crate::util::ExprFactory; use swc_atoms::JsWord; use swc_common::DUMMY_SP; use swc_ecma_ast::*; use swc_ecma_visit::{noop_fold_type, Fold, FoldWith}; /// Compile ES2015 sticky regex to an ES5 RegExp constructor /// ///# Example ///## In /// /// ```js /// /o+/y; /// ``` /// ///## Out
/// new RegExp("o+", "y") /// ``` pub fn sticky_regex() -> impl 'static + Fold { StickyRegex } #[derive(Clone, Copy)] struct StickyRegex; impl Fold for StickyRegex { noop_fold_type!(); fn fold_expr(&mut self, e: Expr) -> Expr { let e = e.fold_children_with(self); match e { Expr::Lit(Lit::Regex(Regex { exp, flags, span })) => { if flags.contains('y') { let str_lit = |s: JsWord| { Box::new(Expr::Lit(Lit::Str(Str { span: DUMMY_SP, value: s, has_escape: false, kind: StrKind::Normal { contains_quote: false, }, }))) }; Expr::New(NewExpr { span, callee: Box::new(quote_ident!(span, "RegExp").into()), args: Some(vec![str_lit(exp).as_arg(), str_lit(flags).as_arg()]), type_args: Default::default(), }) } else { Expr::Lit(Lit::Regex(Regex { exp, flags, span })) } } _ => e, } } } #[cfg(test)] mod tests { use super::*; test!( ::swc_ecma_parser::Syntax::default(), |_| StickyRegex, babel_basic, "var re = /o+/y;", "var re = new RegExp('o+', 'y');" ); test!( ::swc_ecma_parser::Syntax::default(), |_| StickyRegex, babel_ignore_non_sticky, "var re = /o+/;", "var re = /o+/;" ); }
/// /// ```js
segments-memory-storage.ts
import { Segment } from "./loader-interface"; import { SegmentsStorage } from "./hybrid-loader"; export class SegmentsMemoryStorage implements SegmentsStorage { private cache: Map< string, { segment: Segment; lastAccessed: number } > = new Map(); constructor( private settings: { cachedSegmentExpiration: number; cachedSegmentsCount: number; } ) {} public async storeSegment(segment: Segment) { this.cache.set(segment.id, { segment, lastAccessed: performance.now(), }); } public async getSegmentsMap(masterSwarmId: string) { return this.cache; }
public async getSegment(id: string, masterSwarmId: string) { const cacheItem = this.cache.get(id); if (cacheItem === undefined) { return undefined; } cacheItem.lastAccessed = performance.now(); return cacheItem.segment; } public async hasSegment(id: string, masterSwarmId: string) { return this.cache.has(id); } public async clean( masterSwarmId: string, lockedSementsfilter?: (id: string) => boolean ) { const segmentsToDelete: string[] = []; const remainingSegments: { segment: Segment; lastAccessed: number; }[] = []; // Delete old segments const now = performance.now(); for (const cachedSegment of this.cache.values()) { if ( now - cachedSegment.lastAccessed > this.settings.cachedSegmentExpiration ) { segmentsToDelete.push(cachedSegment.segment.id); } else { remainingSegments.push(cachedSegment); } } // Delete segments over cached count let countOverhead = remainingSegments.length - this.settings.cachedSegmentsCount; if (countOverhead > 0) { remainingSegments.sort((a, b) => a.lastAccessed - b.lastAccessed); for (const cachedSegment of remainingSegments) { if ( lockedSementsfilter === undefined || !lockedSementsfilter(cachedSegment.segment.id) ) { segmentsToDelete.push(cachedSegment.segment.id); countOverhead--; if (countOverhead == 0) { break; } } } } segmentsToDelete.forEach((id) => this.cache.delete(id)); return segmentsToDelete.length > 0; } public async destroy() { this.cache.clear(); } }
cd_server.go
package gocd import ( "context" "errors" "fmt" "strings" "time" "github.com/liumingmin/gojenkins" "github.com/liumingmin/goutils/log" ) const ( RUN_STATUS_RUNNING = 1 RUN_STATUS_FINISH = 2 RUN_STATUS_ERR = 3 ) type CdServer struct { jenkins *gojenkins.Jenkins env string s3Info *CdS3Info nodeBroker *CdNodeBroker } type DeployResult struct { Status int Result string ConsoleOutput string } func NewCdServer(ctx context.Context, url, username, token, env string, options ...CdServerOption) *CdServer { jenkins := gojenkins.CreateJenkins(nil, url, username, token) _, err := jenkins.Init(ctx) if err != nil { log.Error(ctx, "jenkins init failed, err: %v", err) } cdServer := &CdServer{ jenkins: jenkins, env: env, nodeBroker: NewCdNodeBroker(jenkins, env, nil), } if len(options) > 0 { for _, option := range options { option(cdServer) } } return cdServer } func (j *CdServer) GetNodeBroker() *CdNodeBroker { return j.nodeBroker } func (j *CdServer) getOrCreateJob(ctx context.Context, service CdService, node *gojenkins.Node) (string, *gojenkins.Job, error) { idx := int64(service.IncDeployCounter()) % node.Raw.NumExecutors jobName := fmt.Sprintf("%v-%v-%v-%v-%v", service.GetCdScript().scriptVersion, j.env, service.GetName(), node.GetName(), idx) job, err := j.jenkins.GetJob(ctx, jobName) if err != nil || job == nil { taskConfig, err := service.GetCdScript().GetCdTaskScriptConfig(node.GetName()) //fmt.Println(taskConfig) if err != nil { return jobName, nil, err } _, err = j.jenkins.CreateJob(ctx, taskConfig, jobName) if err != nil
for i := 0; i < 3; i++ { job, err = j.jenkins.GetJob(ctx, jobName) if err != nil || job == nil { log.Debug(ctx, "GetJob failed: %v, err: %v", jobName, err) time.Sleep(time.Second) continue } log.Info(ctx, "GetJob ok: %v", jobName) break } } return jobName, job, nil } func (j *CdServer) DeploySimple(ctx context.Context, service CdService, nodeName string) (string, int64, error) { node := j.nodeBroker.GetNodeByName(nodeName) if node == nil { return "", 0, errors.New("not found node") } return j.deploy(ctx, service, node) } func (j *CdServer) deploy(ctx context.Context, service CdService, node *gojenkins.Node) (string, int64, error) { jobName, job, err := j.getOrCreateJob(ctx, service, node) if err != nil { return jobName, 0, err } //s3get env var s3EnvsStr strings.Builder for key, value := range j.s3Info.envVar() { s3EnvsStr.WriteString(fmt.Sprintf(" %v=%v", key, value)) } params := map[string]string{ "RUN_ENV": j.env, "S3GET_URL": j.s3Info.s3GetToolUrl, "S3ENV_VAR": s3EnvsStr.String(), } // service generate svc params svcParams := service.GetParams() for k, v := range svcParams { params[k] = v } taskId, err := job.InvokeSimple(ctx, params) if err != nil { log.Error(ctx, "job build failed: %v", err) return jobName, 0, err } return jobName, taskId, nil } func (j *CdServer) GetDeployResult(ctx context.Context, jobName string, taskId int64) (*DeployResult, error) { job, err := j.jenkins.GetJob(ctx, jobName) if err != nil { log.Error(ctx, "get job from jenkins failed: %v, err: %v", jobName, err) return nil, err } build, err := j.jenkins.GetBuildFromQueueID(ctx, job, taskId) if err != nil || build == nil { log.Error(ctx, "get build from jenkins failed: %v, err: %v", taskId, err) return nil, err } status := RUN_STATUS_RUNNING if !build.IsRunning(ctx) { if build.IsGood(ctx) { status = RUN_STATUS_FINISH } else { status = RUN_STATUS_ERR } } taskBuild := &DeployResult{ Status: status, Result: build.GetResult(), ConsoleOutput: build.GetConsoleOutput(ctx), } log.Info(ctx, "get build result from jenkins %v", taskBuild) return taskBuild, nil } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// type CdServerOption func(*CdServer) func CdServerNodeOption(options ...CdNodeOption) CdServerOption { return func(server *CdServer) { server.nodeBroker.SetDefCdNodeParam(NewCdNodeParam(options...)) } } func CdServerS3Option(s3AK, s3SK, s3Endpoint, s3Bucket, s3Region, s3getToolUrl string) CdServerOption { return func(server *CdServer) { server.s3Info = NewCdS3Info(s3AK, s3SK, s3Endpoint, s3Bucket, s3Region, s3getToolUrl) } }
{ log.Error(ctx, "CreateJob failed: %v, err: %v", jobName, err) return jobName, nil, err }
ap_list_element.py
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class APListElement(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ APListElement - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'mac_address': 'str', 'state': 'str' } self.attribute_map = { 'mac_address': 'macAddress', 'state': 'state' } self._mac_address = None self._state = None @property def mac_address(self):
@mac_address.setter def mac_address(self, mac_address): """ Sets the mac_address of this APListElement. MAC address :param mac_address: The mac_address of this APListElement. :type: str """ self._mac_address = mac_address @property def state(self): """ Gets the state of this APListElement. AP mote state :return: The state of this APListElement. :rtype: str """ return self._state @state.setter def state(self, state): """ Sets the state of this APListElement. AP mote state :param state: The state of this APListElement. :type: str """ allowed_values = ["lost", "negotiating", "connected", "operational", "decommissioned"] if state not in allowed_values: raise ValueError( "Invalid value for `state`, must be one of {0}" .format(allowed_values) ) self._state = state def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
""" Gets the mac_address of this APListElement. MAC address :return: The mac_address of this APListElement. :rtype: str """ return self._mac_address
user.d.ts
export type UserInfo = { username: string
firstName: string lastName: string }
generateSinPoints.ts
/** generates points along a sin wave, with increasing height toward the center. */ export default function generateSinPoints({ width, height, numberOfWaves = 10, pointsPerWave = 10, }: { width: number; height: number; numberOfWaves?: number; pointsPerWave?: number; }) { const waveLength = width / numberOfWaves; const distanceBetweenPoints = waveLength / pointsPerWave; const sinPoints: { x: number; y: number }[] = []; for (let waveIndex = 0; waveIndex < numberOfWaves; waveIndex += 1) { const waveDistFromStart = waveIndex * waveLength; for (let pointIndex = 0; pointIndex <= pointsPerWave; pointIndex += 1) { const waveXFraction = pointIndex / pointsPerWave; const waveX = pointIndex * distanceBetweenPoints; const globalX = waveDistFromStart + waveX; // scale height based x position const globalXFraction = (width - globalX) / width; const waveHeight = Math.min(globalXFraction, 1 - globalXFraction) * height; sinPoints.push({ x: globalX, y: waveHeight * Math.sin(waveXFraction * (2 * Math.PI)) }); }
}
} return sinPoints;
mercury_test.go
// Copyright © 2013, 2014, The Go-MERCURY Authors. All rights reserved. // Use of this source code is governed by a LGPLv2.1 // license that can be found in the LICENSE file. // +build linux,cgo package mercury import ( "fmt" "math/rand" "os" "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" ) const ( ContainerName = "lorem" SnapshotName = "snap0" ContainerRestoreName = "ipsum" ContainerCloneName = "consectetur" ContainerCloneOverlayName = "adipiscing" ContainerCloneAufsName = "pellentesque" ) func PathExists(name string) bool { _, err := os.Lstat(name) if err != nil && os.IsNotExist(err) { return false } return true } func unprivileged() bool { if os.Geteuid() != 0 { return true } return false } func supported(moduleName string) bool { if _, err := os.Stat("/sys/module/" + moduleName); err != nil { return false } return true } func TestVersion(t *testing.T) { t.Logf("MERCURY version: %s", Version()) } func TestDefaultConfigPath(t *testing.T) { if DefaultConfigPath() == "" { t.Errorf("DefaultConfigPath failed...") } } func TestSetConfigPath(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } currentPath := c.ConfigPath() if err := c.SetConfigPath("/tmp"); err != nil { t.Errorf(err.Error()) } newPath := c.ConfigPath() if currentPath == newPath { t.Errorf("SetConfigPath failed...") } } func TestAcquire(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } Acquire(c) Release(c) } func TestConcurrentDefined_Negative(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) var wg sync.WaitGroup for i := 0; i <= 100; i++ { wg.Add(1) go func() { c, err := NewContainer(strconv.Itoa(rand.Intn(10))) if err != nil { t.Errorf(err.Error()) } // sleep for a while to simulate some dummy work time.Sleep(time.Millisecond * time.Duration(rand.Intn(250))) if c.Defined() { t.Errorf("Defined_Negative failed...") } wg.Done() }() } wg.Wait() } func TestDefined_Negative(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.Defined() { t.Errorf("Defined_Negative failed...") } } func TestExecute(t *testing.T) { if unprivileged() { t.Skip("skipping test in unprivileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.Execute("/bin/true"); err != nil { t.Errorf(err.Error()) } } func TestSetVerbosity(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } c.SetVerbosity(Quiet) } func TestCreate(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DownloadTemplateOptions if !unprivileged() { options = BusyboxTemplateOptions } if err := c.Create(options); err != nil { t.Errorf(err.Error()) } } func TestClone(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err = c.Clone(ContainerCloneName, DefaultCloneOptions); err != nil { t.Errorf(err.Error()) } } func TestCloneUsingOverlayfs(t *testing.T) { if !supported("overlayfs") { t.Skip("skipping test as overlayfs support is missing.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } err = c.Clone(ContainerCloneOverlayName, CloneOptions{ Backend: Overlayfs, KeepName: true, KeepMAC: true, Snapshot: true, }) if err != nil { t.Errorf(err.Error()) } } func TestCloneUsingAufs(t *testing.T) { if unprivileged() { t.Skip("skipping test in unprivileged mode.") } if !supported("aufs") { t.Skip("skipping test as aufs support is missing.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } err = c.Clone(ContainerCloneAufsName, CloneOptions{ Backend: Aufs, KeepName: true, KeepMAC: true, Snapshot: true, }) if err != nil { t.Errorf(err.Error()) } } func TestCreateSnapshot(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.CreateSnapshot(); err != nil { t.Errorf(err.Error()) } } func TestCreateSnapshots(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } for i := 0; i < 3; i++ { if _, err := c.CreateSnapshot(); err != nil { t.Errorf(err.Error()) } } } func TestRestoreSnapshot(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } snapshot := Snapshot{Name: SnapshotName} if err := c.RestoreSnapshot(snapshot, ContainerRestoreName); err != nil { t.Errorf(err.Error()) } } func TestConcurrentCreate(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) if unprivileged() { t.Skip("skipping test in unprivileged mode.") } var wg sync.WaitGroup options := BusyboxTemplateOptions for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { c, err := NewContainer(strconv.Itoa(i)) if err != nil { t.Errorf(err.Error()) } // sleep for a while to simulate some dummy work time.Sleep(time.Millisecond * time.Duration(rand.Intn(250))) if err := c.Create(options); err != nil { t.Errorf(err.Error()) } wg.Done() }(i) } wg.Wait() } func TestSnapshots(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.Snapshots(); err != nil { t.Errorf(err.Error()) } } func TestConcurrentStart(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) if unprivileged() { t.Skip("skipping test in unprivileged mode.") } var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { c, err := NewContainer(strconv.Itoa(i)) if err != nil { t.Errorf(err.Error()) } if err := c.Start(); err != nil { t.Errorf(err.Error()) } c.Wait(RUNNING, 30*time.Second) if !c.Running() { t.Errorf("Starting the container failed...") } wg.Done() }(i) } wg.Wait() } func TestConfigFileName(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.ConfigFileName() == "" { t.Errorf("ConfigFileName failed...") } } func TestDefined_Positive(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if !c.Defined() { t.Errorf("Defined_Positive failed...") } } func TestConcurrentDefined_Positive(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) if unprivileged() { t.Skip("skipping test in unprivileged mode.") } var wg sync.WaitGroup for i := 0; i <= 100; i++ { wg.Add(1) go func() { c, err := NewContainer(strconv.Itoa(rand.Intn(10))) if err != nil { t.Errorf(err.Error()) } // sleep for a while to simulate some dummy work time.Sleep(time.Millisecond * time.Duration(rand.Intn(250))) if !c.Defined() { t.Errorf("Defined_Positive failed...") } wg.Done() }() } wg.Wait() } func TestInitPid_Negative(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.InitPid() != -1 { t.Errorf("InitPid failed...") } } func TestStart(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Start(); err != nil { t.Errorf(err.Error()) } c.Wait(RUNNING, 30*time.Second) if !c.Running() { t.Errorf("Starting the container failed...") } } func TestWaitIPAddresses(t *testing.T) { if !unprivileged() { t.Skip("skipping test in privileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.WaitIPAddresses(30 * time.Second); err != nil { t.Errorf(err.Error()) } } func TestControllable(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if !c.Controllable() { t.Errorf("Controling the container failed...") } } func TestContainerNames(t *testing.T) { if ContainerNames() == nil { t.Errorf("ContainerNames failed...") } } func TestDefinedContainerNames(t *testing.T) { if DefinedContainerNames() == nil { t.Errorf("DefinedContainerNames failed...") } } func TestActiveContainerNames(t *testing.T) { if ActiveContainerNames() == nil { t.Errorf("ActiveContainerNames failed...") } } func TestContainers(t *testing.T) { if Containers() == nil { t.Errorf("Containers failed...") } } func TestDefinedContainers(t *testing.T) { if DefinedContainers() == nil { t.Errorf("DefinedContainers failed...") } } func TestActiveContainers(t *testing.T) { if ActiveContainers() == nil { t.Errorf("ActiveContainers failed...") } } func TestRunning(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if !c.Running() { t.Errorf("Checking the container failed...") } } func TestWantDaemonize(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.WantDaemonize(false); err != nil || c.Daemonize() { t.Errorf("WantDaemonize failed...") } } func TestWantCloseAllFds(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.WantCloseAllFds(true); err != nil { t.Errorf("WantCloseAllFds failed...") } } func TestSetLogLevel(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.SetLogLevel(WARN); err != nil || c.LogLevel() != WARN { t.Errorf("SetLogLevel( failed...") } } func TestSetLogFile(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.SetLogFile("/tmp/" + ContainerName); err != nil || c.LogFile() != "/tmp/"+ContainerName { t.Errorf("SetLogFile failed...") } } func TestInitPid_Positive(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.InitPid() == -1 { t.Errorf("InitPid failed...") } } func TestName(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.Name() != ContainerName { t.Errorf("Name failed...") } } func TestFreeze(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Freeze(); err != nil { t.Errorf(err.Error()) } c.Wait(FROZEN, 30*time.Second) if c.State() != FROZEN { t.Errorf("Freezing the container failed...") } } func TestUnfreeze(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Unfreeze(); err != nil { t.Errorf(err.Error()) } c.Wait(RUNNING, 30*time.Second) if !c.Running() { t.Errorf("Unfreezing the container failed...") } } func TestLoadConfigFile(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.LoadConfigFile(c.ConfigFileName()); err != nil { t.Errorf(err.Error()) } } func TestSaveConfigFile(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.SaveConfigFile(c.ConfigFileName()); err != nil { t.Errorf(err.Error()) } } func TestConfigItem(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.ConfigItem("mercury.utsname")[0] != ContainerName { t.Errorf("ConfigItem failed...") } } func TestSetConfigItem(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.SetConfigItem("mercury.utsname", ContainerName); err != nil { t.Errorf(err.Error()) } if c.ConfigItem("mercury.utsname")[0] != ContainerName { t.Errorf("ConfigItem failed...") } } func TestRunningConfigItem(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if c.RunningConfigItem("mercury.network.0.type") == nil { t.Errorf("RunningConfigItem failed...") } } func TestSetCgroupItem(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } maxMem := c.CgroupItem("memory.max_usage_in_bytes")[0] currentMem := c.CgroupItem("memory.limit_in_bytes")[0] if err := c.SetCgroupItem("memory.limit_in_bytes", maxMem); err != nil { t.Errorf(err.Error()) } newMem := c.CgroupItem("memory.limit_in_bytes")[0] if newMem == currentMem { t.Errorf("SetCgroupItem failed...") } } func TestClearConfigItem(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.ClearConfigItem("mercury.cap.drop"); err != nil { t.Errorf(err.Error()) } if c.ConfigItem("mercury.cap.drop")[0] != "" { t.Errorf("ClearConfigItem failed...") } } func TestConfigKeys(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } keys := strings.Join(c.ConfigKeys("mercury.network.0"), " ") if !strings.Contains(keys, "mtu") { t.Errorf("Keys failed...") } } func TestInterfaces(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.Interfaces(); err != nil { t.Errorf(err.Error()) } } func TestMemoryUsage(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.MemoryUsage(); err != nil { t.Errorf(err.Error()) } } func TestKernelMemoryUsage(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.KernelMemoryUsage(); err != nil { t.Errorf(err.Error()) } } func TestMemorySwapUsage(t *testing.T) { if !PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") { t.Skip("skipping the test as it requires memory.memsw.limit_in_bytes to be set") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.MemorySwapUsage(); err != nil { t.Errorf(err.Error()) } } func TestBlkioUsage(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.BlkioUsage(); err != nil { t.Errorf(err.Error()) } } func TestMemoryLimit(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.MemoryLimit(); err != nil { t.Errorf(err.Error()) } } func TestSoftMemoryLimit(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.SoftMemoryLimit(); err != nil { t.Errorf(err.Error()) } } func TestKernelMemoryLimit(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.KernelMemoryLimit(); err != nil { t.Errorf(err.Error()) } } func TestMemorySwapLimit(t *testing.T) { if !PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") { t.Skip("skipping the test as it requires memory.memsw.limit_in_bytes to be set") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.MemorySwapLimit(); err != nil { t.Errorf(err.Error()) } } func TestSetMemoryLimit(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } oldMemLimit, err := c.MemoryLimit() if err != nil { t.Errorf(err.Error()) } if err := c.SetMemoryLimit(oldMemLimit * 4); err != nil { t.Errorf(err.Error()) } newMemLimit, err := c.MemoryLimit() if err != nil { t.Errorf(err.Error()) } if newMemLimit != oldMemLimit*4 { t.Errorf("SetMemoryLimit failed") } } func TestSetSoftMemoryLimit(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } oldMemLimit, err := c.MemoryLimit() if err != nil { t.Errorf(err.Error()) } if err := c.SetSoftMemoryLimit(oldMemLimit * 4); err != nil { t.Errorf(err.Error()) } newMemLimit, err := c.SoftMemoryLimit() if err != nil { t.Errorf(err.Error()) } if newMemLimit != oldMemLimit*4 { t.Errorf("SetSoftMemoryLimit failed") } } func TestSetKernelMemoryLimit(t *testing.T) { t.Skip("skipping the test as it requires memory.kmem.limit_in_bytes to be set") c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } oldMemLimit, err := c.KernelMemoryLimit() if err != nil { t.Errorf(err.Error()) } if err := c.SetKernelMemoryLimit(oldMemLimit * 4); err != nil { t.Errorf(err.Error()) } newMemLimit, err := c.KernelMemoryLimit() if err != nil { t.Errorf(err.Error()) } // Floats aren't exactly exact, check that we did get something smaller if newMemLimit < oldMemLimit*3 { t.Errorf("SetKernelMemoryLimit failed") } } func TestSetMemorySwapLimit(t *testing.T) { if !PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") { t.Skip("skipping the test as it requires memory.memsw.limit_in_bytes to be set") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } oldMemorySwapLimit, err := c.MemorySwapLimit() if err != nil { t.Errorf(err.Error()) } if err := c.SetMemorySwapLimit(oldMemorySwapLimit / 4); err != nil { t.Errorf(err.Error()) } newMemorySwapLimit, err := c.MemorySwapLimit() if err != nil { t.Errorf(err.Error()) } // Floats aren't exactly exact, check that we did get something smaller if newMemorySwapLimit > oldMemorySwapLimit/3 { t.Errorf("SetSwapLimit failed") } } func TestCPUTime(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.CPUTime(); err != nil { t.Errorf(err.Error()) } } func TestCPUTimePerCPU(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.CPUTimePerCPU(); err != nil { t.Errorf(err.Error()) } } func TestCPUStats(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.CPUStats(); err != nil { t.Errorf(err.Error()) } } func TestRunCommandNoWait(t *testing.T) { c, err := NewContainer("TestRunCommandNoWait") if err != nil { t.Errorf(err.Error()) t.FailNow() } options := DownloadTemplateOptions if !unprivileged() { options = BusyboxTemplateOptions } if err := c.Create(options); err != nil { t.Errorf(err.Error()) t.FailNow() } defer c.Destroy() err = c.Start() if err != nil { t.Errorf(err.Error()) t.FailNow() } defer c.Stop() argsThree := []string{"/bin/sh", "-c", "exit 0"} pid, err := c.RunCommandNoWait(argsThree, DefaultAttachOptions) if err != nil { t.Errorf(err.Error()) t.FailNow() } proc, err := os.FindProcess(pid) if err != nil { t.Errorf(err.Error()) t.FailNow() } procState, err := proc.Wait() if err != nil { t.Errorf(err.Error()) t.FailNow() } if !procState.Success() { t.Errorf("Expected success") t.FailNow() } argsThree = []string{"/bin/sh", "-c", "exit 1"} pid, err = c.RunCommandNoWait(argsThree, DefaultAttachOptions) if err != nil { t.Errorf(err.Error()) t.FailNow() } proc, err = os.FindProcess(pid) if err != nil { t.Errorf(err.Error()) t.FailNow() } procState, err = proc.Wait() if err != nil { t.Errorf(err.Error()) t.FailNow() } if procState.Success() { t.Errorf("Expected failure") t.FailNow() } } func TestRunCommand(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } argsThree := []string{"/bin/sh", "-c", "exit 0"} ok, err := c.RunCommand(argsThree, DefaultAttachOptions) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } argsThree = []string{"/bin/sh", "-c", "exit 1"} ok, err = c.RunCommand(argsThree, DefaultAttachOptions) if err != nil { t.Errorf(err.Error()) } if ok != false { t.Errorf("Expected failure") } } func TestCommandWithEnv(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DefaultAttachOptions options.Env = []string{"FOO=BAR"} options.ClearEnv = true args := []string{"/bin/sh", "-c", "test $FOO = 'BAR'"} ok, err := c.RunCommand(args, options) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } } func TestCommandWithEnvToKeep(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DefaultAttachOptions options.ClearEnv = true options.EnvToKeep = []string{"TERM"} args := []string{"/bin/sh", "-c", fmt.Sprintf("test $TERM = '%s'", os.Getenv("TERM"))} ok, err := c.RunCommand(args, DefaultAttachOptions) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } } func TestCommandWithCwd(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DefaultAttachOptions options.Cwd = "/tmp" args := []string{"/bin/sh", "-c", "test `pwd` = /tmp"} ok, err := c.RunCommand(args, options) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } } func TestCommandWithUIDGID(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DefaultAttachOptions options.UID = 1000 options.GID = 1000 args := []string{"/bin/sh", "-c", "test `id -u` = 1000 && test `id -g` = 1000"} ok, err := c.RunCommand(args, options) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } } func TestCommandWithArch(t *testing.T) { uname := syscall.Utsname{} if err := syscall.Uname(&uname); err != nil { t.Errorf(err.Error()) } arch := "" for _, c := range uname.Machine { if c == 0 { break } arch += string(byte(c)) } if arch != "x86_64" && arch != "i686" { t.Skip("skipping architecture test, not on x86") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } options := DefaultAttachOptions options.Arch = X86 args := []string{"/bin/sh", "-c", "test `uname -m` = i686"} ok, err := c.RunCommand(args, options) if err != nil { t.Errorf(err.Error()) } if ok != true { t.Errorf("Expected success") } } func TestConsoleFd(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.ConsoleFd(0); err != nil { t.Errorf(err.Error()) } } func TestIPAddress(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if unprivileged() { time.Sleep(3 * time.Second) } if _, err := c.IPAddress("lo"); err != nil { t.Errorf(err.Error()) } } func TestIPv4Address(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if unprivileged() { time.Sleep(3 * time.Second) } if _, err := c.IPv4Address("lo"); err != nil { t.Errorf(err.Error()) } } func TestIPv46ddress(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if unprivileged() { time.Sleep(3 * time.Second) } if _, err := c.IPv6Address("lo"); err != nil { t.Errorf(err.Error()) } } func TestAddDeviceNode(t *testing.T) { if unprivileged() { t.Skip("skipping test in unprivileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.AddDeviceNode("/dev/network_latency"); err != nil { t.Errorf(err.Error()) } } func TestRemoveDeviceNode(t *testing.T) { if unprivileged() { t.Skip("skipping test in unprivileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.RemoveDeviceNode("/dev/network_latency"); err != nil { t.Errorf(err.Error()) } } func TestIPv4Addresses(t *testing.T) { if !unprivileged() { t.Skip("skipping test in privileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.IPv4Addresses(); err != nil { t.Errorf(err.Error()) } } func T
t *testing.T) { if !unprivileged() { t.Skip("skipping test in privileged mode.") } c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if _, err := c.IPv6Addresses(); err != nil { t.Errorf(err.Error()) } } func TestReboot(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Reboot(); err != nil { t.Errorf("Rebooting the container failed...") } c.Wait(RUNNING, 30*time.Second) } func TestConcurrentShutdown(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) if unprivileged() { t.Skip("skipping test in unprivileged mode.") } var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { c, err := NewContainer(strconv.Itoa(i)) if err != nil { t.Errorf(err.Error()) } if err := c.Shutdown(30 * time.Second); err != nil { t.Errorf(err.Error()) } c.Wait(STOPPED, 30*time.Second) if c.Running() { t.Errorf("Shutting down the container failed...") } wg.Done() }(i) } wg.Wait() } func TestShutdown(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Shutdown(30 * time.Second); err != nil { t.Errorf(err.Error()) } c.Wait(STOPPED, 30*time.Second) if c.Running() { t.Errorf("Shutting down the container failed...") } } func TestStop(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Start(); err != nil { t.Errorf(err.Error()) } if err := c.Stop(); err != nil { t.Errorf(err.Error()) } c.Wait(STOPPED, 30*time.Second) if c.Running() { t.Errorf("Stopping the container failed...") } } func TestDestroySnapshot(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } snapshot := Snapshot{Name: SnapshotName} if err := c.DestroySnapshot(snapshot); err != nil { t.Errorf(err.Error()) } } func TestDestroyAllSnapshots(t *testing.T) { c, err := NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.DestroyAllSnapshots(); err != nil { t.Errorf(err.Error()) } } func TestDestroy(t *testing.T) { if supported("overlayfs") { c, err := NewContainer(ContainerCloneOverlayName) if err != nil { t.Errorf(err.Error()) } if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } } if !unprivileged() && supported("aufs") { c, err := NewContainer(ContainerCloneAufsName) if err != nil { t.Errorf(err.Error()) } if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } } c, err := NewContainer(ContainerCloneName) if err != nil { t.Errorf(err.Error()) } if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } c, err = NewContainer(ContainerRestoreName) if err != nil { t.Errorf(err.Error()) } if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } c, err = NewContainer(ContainerName) if err != nil { t.Errorf(err.Error()) } if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } } func TestConcurrentDestroy(t *testing.T) { defer runtime.GOMAXPROCS(runtime.NumCPU()) if unprivileged() { t.Skip("skipping test in unprivileged mode.") } var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { c, err := NewContainer(strconv.Itoa(i)) if err != nil { t.Errorf(err.Error()) } // sleep for a while to simulate some dummy work time.Sleep(time.Millisecond * time.Duration(rand.Intn(250))) if err := c.Destroy(); err != nil { t.Errorf(err.Error()) } wg.Done() }(i) } wg.Wait() } func TestBackendStore(t *testing.T) { var X struct { store BackendStore } if X.store.String() != "" { t.Error("zero value of BackendStore should be invalid") } } func TestState(t *testing.T) { var X struct { state State } if X.state.String() != "" { t.Error("zero value of State should be invalid") } } func TestSupportedConfigItems(t *testing.T) { if VersionAtLeast(2, 1, 0) { if !IsSupportedConfigItem("mercury.arch") { t.Errorf("IsSupportedConfigItem failed to detect \"mercury.arch\" as supported config item...") } if IsSupportedConfigItem("mercury.nonsense") { t.Errorf("IsSupportedConfigItem failed to detect \"mercury.nonsense\" as unsupported config item...") } } }
estIPv6Addresses(
keyring.go
/* Copyright 2019 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kms import ( "context" "fmt" kmsv1 "google.golang.org/api/cloudkms/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/connection" "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane-contrib/provider-gcp/apis/kms/v1alpha1" scv1alpha1 "github.com/crossplane-contrib/provider-gcp/apis/v1alpha1" gcp "github.com/crossplane-contrib/provider-gcp/pkg/clients" "github.com/crossplane-contrib/provider-gcp/pkg/clients/keyring" "github.com/crossplane-contrib/provider-gcp/pkg/features" ) // Error strings. const ( errNewClient = "cannot create new GCP KMS API client" errNotKeyRing = "managed resource is not a GCP KeyRing" errGet = "cannot get GCP object via KMS API" errCreate = "cannot create GCP object via KMS API" errUpdate = "cannot update GCP object via KMS API" ) // SetupKeyRing adds a controller that reconciles KeyRings. func SetupKeyRing(mgr ctrl.Manager, o controller.Options) error { name := managed.ControllerName(v1alpha1.KeyRingGroupKind) cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} if o.Features.Enabled(features.EnableAlphaExternalSecretStores) { cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), scv1alpha1.StoreConfigGroupVersionKind)) } r := managed.NewReconciler(mgr, resource.ManagedKind(v1alpha1.KeyRingGroupVersionKind), managed.WithExternalConnecter(&keyRingConnecter{client: mgr.GetClient()}), managed.WithPollInterval(o.PollInterval), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithConnectionPublishers(cps...)) return ctrl.NewControllerManagedBy(mgr). Named(name). WithOptions(o.ForControllerRuntime()). For(&v1alpha1.KeyRing{}). Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) } type keyRingConnecter struct { client client.Client } // Connect sets up kms client using credentials from the provider func (c *keyRingConnecter) Connect(ctx context.Context, mg resource.Managed) (managed.ExternalClient, error) { cr, ok := mg.(*v1alpha1.KeyRing) if !ok { return nil, errors.New(errNotKeyRing) } projectID, opts, err := gcp.GetConnectionInfo(ctx, c.client, mg) if err != nil { return nil, err } s, err := kmsv1.NewService(ctx, opts...) if err != nil { return nil, errors.Wrap(err, errNewClient) } rrn := NewRelativeResourceNamerKeyRing(projectID, cr.Spec.ForProvider.Location) return &keyRingExternal{keyrings: kmsv1.NewProjectsLocationsKeyRingsService(s), rrn: rrn}, nil } type keyRingExternal struct {
rrn RelativeResourceNamerKeyRing } func (e *keyRingExternal) Observe(ctx context.Context, mg resource.Managed) (managed.ExternalObservation, error) { cr, ok := mg.(*v1alpha1.KeyRing) if !ok { return managed.ExternalObservation{}, errors.New(errNotKeyRing) } // Hack to cleanup CR without deleting actual resource. // It is not possible to delete KMS KeyRings, there is no "delete" method defined: // https://cloud.google.com/kms/docs/reference/rest#rest-resource:-v1.projects.locations.keyrings // Also see related faq: https://cloud.google.com/kms/docs/faq#cannot_delete if meta.WasDeleted(cr) { return managed.ExternalObservation{}, nil } call := e.keyrings.Get(e.rrn.ResourceName(cr)) instance, err := call.Context(ctx).Do() if gcp.IsErrorNotFound(err) { return managed.ExternalObservation{ResourceExists: false}, nil } if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, errGet) } cr.Status.SetConditions(xpv1.Available()) cr.Status.AtProvider = keyring.GenerateObservation(*instance) return managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: true, ConnectionDetails: managed.ConnectionDetails{}, }, nil } // https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings/create func (e *keyRingExternal) Create(ctx context.Context, mg resource.Managed) (managed.ExternalCreation, error) { cr, ok := mg.(*v1alpha1.KeyRing) if !ok { return managed.ExternalCreation{}, errors.New(errNotKeyRing) } cr.SetConditions(xpv1.Creating()) instance := &kmsv1.KeyRing{} if _, err := e.keyrings.Create(e.rrn.LocationRRN(), instance). KeyRingId(meta.GetExternalName(cr)).Context(ctx).Do(); err != nil { return managed.ExternalCreation{}, errors.Wrap(err, errCreate) } return managed.ExternalCreation{}, nil } func (e *keyRingExternal) Update(ctx context.Context, mg resource.Managed) (managed.ExternalUpdate, error) { // It is not possible to update KMS KeyRings, there is no "patch" method defined: // https://cloud.google.com/kms/docs/reference/rest#rest-resource:-v1.projects.locations.keyrings return managed.ExternalUpdate{}, nil } func (e *keyRingExternal) Delete(ctx context.Context, mg resource.Managed) error { // It is not possible to delete KMS KeyRings, there is no "delete" method defined: // https://cloud.google.com/kms/docs/reference/rest#rest-resource:-v1.projects.locations.keyrings // Also see related faq: https://cloud.google.com/kms/docs/faq#cannot_delete return nil } // NewRelativeResourceNamerKeyRing makes an instance of the RelativeResourceNamerKeyRing // which is the only type that is allowed to know how to construct GCP resource names // for the KMS Keyring type. func NewRelativeResourceNamerKeyRing(projectName, location string) RelativeResourceNamerKeyRing { return RelativeResourceNamerKeyRing{projectName: projectName, location: location} } // RelativeResourceNamerKeyRing allows the controller to generate the "relative resource name" // for the KeyRing and GCP project based on the keyRing external-name annotation. // https://cloud.google.com/apis/design/resource_names#relative_resource_name // The relative resource name for KeyRing has the following format: // projects/{projectName}/locations/{location}/keyRings/{keyRingName} type RelativeResourceNamerKeyRing struct { projectName string location string } // ProjectRRN yields the relative resource name for a GCP project func (rrn RelativeResourceNamerKeyRing) ProjectRRN() string { return fmt.Sprintf("projects/%s", rrn.projectName) } // LocationRRN yields the relative resource name for a GCP Project Location func (rrn RelativeResourceNamerKeyRing) LocationRRN() string { return fmt.Sprintf("%s/locations/%s", rrn.ProjectRRN(), rrn.location) } // ResourceName yields the relative resource name for the KeyRing resource func (rrn RelativeResourceNamerKeyRing) ResourceName(kr *v1alpha1.KeyRing) string { return fmt.Sprintf("%s/keyRings/%s", rrn.LocationRRN(), meta.GetExternalName(kr)) }
keyrings keyring.Client
win32.ts
import * as Path from 'path' import { enumerateValues, HKEY, RegistryValue, RegistryValueType, } from 'registry-js' import { pathExists } from 'fs-extra' import { IFoundEditor } from './found-editor' interface IWindowsAppInformation { displayName: string publisher: string installLocation: string } type ExpectedInstallationChecker = ( displayName: string, publisher: string ) => boolean /** Represents an external editor on Windows */ interface IWindowsExternalEditor { /** Name of the editor. It will be used both as identifier and user-facing. */ readonly name: string /** * Set of registry keys associated with the installed application. * * Some tools (like VSCode) may support a 64-bit or 32-bit version of the * tool - we should use whichever they have installed. */ readonly registryKeys: ReadonlyArray<{ key: HKEY; subKey: string }> /** * List of path components from the editor's installation folder to the * executable shim. **/ readonly executableShimPath: ReadonlyArray<string> /** * Registry key with the install location of the app. If not provided, * 'InstallLocation' will be used. **/ readonly installLocationRegistryKey?: string /** * Function to check if the found installation matches the expected identifier * details. * * @param displayName The display name as listed in the registry * @param publisher The publisher who created the installer */ readonly expectedInstallationChecker: ExpectedInstallationChecker } const registryKey = (key: HKEY, ...subKeys: string[]) => ({ key, subKey: Path.win32.join(...subKeys), }) const uninstallSubKey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall' const wow64UninstallSubKey = 'SOFTWARE\\WOW6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall' const CurrentUserUninstallKey = (subKey: string) => registryKey(HKEY.HKEY_CURRENT_USER, uninstallSubKey, subKey) const LocalMachineUninstallKey = (subKey: string) => registryKey(HKEY.HKEY_LOCAL_MACHINE, uninstallSubKey, subKey) const Wow64LocalMachineUninstallKey = (subKey: string) => registryKey(HKEY.HKEY_LOCAL_MACHINE, wow64UninstallSubKey, subKey) /** * This list contains all the external editors supported on Windows. Add a new * entry here to add support for your favorite editor. **/ const editors: IWindowsExternalEditor[] = [ { name: 'Atom', registryKeys: [CurrentUserUninstallKey('atom')], executableShimPath: ['bin', 'atom.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName === 'Atom' && publisher === 'GitHub Inc.', }, { name: 'Atom Beta', registryKeys: [CurrentUserUninstallKey('atom-beta')], executableShimPath: ['bin', 'atom-beta.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName === 'Atom Beta' && publisher === 'GitHub Inc.', }, { name: 'Atom Nightly', registryKeys: [CurrentUserUninstallKey('atom-nightly')], executableShimPath: ['bin', 'atom-nightly.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName === 'Atom Nightly' && publisher === 'GitHub Inc.', }, { name: 'Visual Studio Code', registryKeys: [ // 64-bit version of VSCode (user) - provided by default in 64-bit Windows CurrentUserUninstallKey('{771FD6B0-FA20-440A-A002-3B3BAC16DC50}_is1'), // 32-bit version of VSCode (user) CurrentUserUninstallKey('{D628A17A-9713-46BF-8D57-E671B46A741E}_is1'), // 64-bit version of VSCode (system) - was default before user scope installation LocalMachineUninstallKey('{EA457B21-F73E-494C-ACAB-524FDE069978}_is1'), // 32-bit version of VSCode (system) Wow64LocalMachineUninstallKey( '{F8A2A208-72B3-4D61-95FC-8A65D340689B}_is1' ), ], executableShimPath: ['bin', 'code.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Microsoft Visual Studio Code') && publisher === 'Microsoft Corporation', }, { name: 'Visual Studio Code (Insiders)', registryKeys: [ // 64-bit version of VSCode (user) - provided by default in 64-bit Windows CurrentUserUninstallKey('{217B4C08-948D-4276-BFBB-BEE930AE5A2C}_is1'), // 32-bit version of VSCode (user) CurrentUserUninstallKey('{26F4A15E-E392-4887-8C09-7BC55712FD5B}_is1'), // 64-bit version of VSCode (system) - was default before user scope installation LocalMachineUninstallKey('{1287CAD5-7C8D-410D-88B9-0D1EE4A83FF2}_is1'), // 32-bit version of VSCode (system) Wow64LocalMachineUninstallKey( '{C26E74D1-022E-4238-8B9D-1E7564A36CC9}_is1' ), ], executableShimPath: ['bin', 'code-insiders.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Microsoft Visual Studio Code Insiders') && publisher === 'Microsoft Corporation', }, { name: 'Visual Studio Codium', registryKeys: [ // 64-bit version of VSCodium (user) CurrentUserUninstallKey('{2E1F05D1-C245-4562-81EE-28188DB6FD17}_is1'), // 32-bit version of VSCodium (user) CurrentUserUninstallKey('{C6065F05-9603-4FC4-8101-B9781A25D88E}}_is1'), // 64-bit version of VSCodium (system) LocalMachineUninstallKey('{D77B7E06-80BA-4137-BCF4-654B95CCEBC5}_is1'), // 32-bit version of VSCodium (system) Wow64LocalMachineUninstallKey( '{E34003BB-9E10-4501-8C11-BE3FAA83F23F}_is1' ), ], executableShimPath: ['bin', 'codium.cmd'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('VSCodium') && publisher === 'Microsoft Corporation', }, { name: 'Sublime Text', registryKeys: [LocalMachineUninstallKey('Sublime Text 3_is1')], executableShimPath: ['subl.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Sublime Text') && publisher === 'Sublime HQ Pty Ltd', }, { name: 'ColdFusion Builder', registryKeys: [ // 64-bit version of ColdFusionBuilder3 LocalMachineUninstallKey('Adobe ColdFusion Builder 3_is1'), // 64-bit version of ColdFusionBuilder2016 LocalMachineUninstallKey('Adobe ColdFusion Builder 2016'), ], executableShimPath: ['CFBuilder.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Adobe ColdFusion Builder') && publisher === 'Adobe Systems Incorporated', }, { name: 'Typora', registryKeys: [ // 64-bit version of Typora LocalMachineUninstallKey('{37771A20-7167-44C0-B322-FD3E54C56156}_is1'), // 32-bit version of Typora Wow64LocalMachineUninstallKey( '{37771A20-7167-44C0-B322-FD3E54C56156}_is1' ), ], executableShimPath: ['typora.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Typora') && publisher === 'typora.io', }, { name: 'SlickEdit', registryKeys: [ // 64-bit version of SlickEdit Pro 2018 LocalMachineUninstallKey('{18406187-F49E-4822-CAF2-1D25C0C83BA2}'), // 32-bit version of SlickEdit Pro 2018 Wow64LocalMachineUninstallKey('{18006187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Standard 2018 LocalMachineUninstallKey('{18606187-F49E-4822-CAF2-1D25C0C83BA2}'), // 32-bit version of SlickEdit Standard 2018 Wow64LocalMachineUninstallKey('{18206187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2017 LocalMachineUninstallKey('{15406187-F49E-4822-CAF2-1D25C0C83BA2}'), // 32-bit version of SlickEdit Pro 2017 Wow64LocalMachineUninstallKey('{15006187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2016 (21.0.1) LocalMachineUninstallKey('{10C06187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2016 (21.0.0) LocalMachineUninstallKey('{10406187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2015 (20.0.3) LocalMachineUninstallKey('{0DC06187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2015 (20.0.2) LocalMachineUninstallKey('{0D406187-F49E-4822-CAF2-1D25C0C83BA2}'), // 64-bit version of SlickEdit Pro 2014 (19.0.2) LocalMachineUninstallKey('{7CC0E567-ACD6-41E8-95DA-154CEEDB0A18}'), ], executableShimPath: ['win', 'vs.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('SlickEdit') && publisher === 'SlickEdit Inc.', }, { name: 'JetBrains Webstorm', registryKeys: [ Wow64LocalMachineUninstallKey('WebStorm 2018.3'), Wow64LocalMachineUninstallKey('WebStorm 2019.2'), Wow64LocalMachineUninstallKey('WebStorm 2019.2.4'), Wow64LocalMachineUninstallKey('WebStorm 2019.3'), Wow64LocalMachineUninstallKey('WebStorm 2020.1'), ], executableShimPath: ['bin', 'webstorm.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('WebStorm') && publisher === 'JetBrains s.r.o.', }, { name: 'JetBrains Phpstorm', registryKeys: [ Wow64LocalMachineUninstallKey('PhpStorm 2019.2'), Wow64LocalMachineUninstallKey('PhpStorm 2019.2.4'), Wow64LocalMachineUninstallKey('PhpStorm 2019.3'), Wow64LocalMachineUninstallKey('PhpStorm 2020.1'), ], executableShimPath: ['bin', 'phpstorm.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('PhpStorm') && publisher === 'JetBrains s.r.o.', }, { name: 'Notepad++', registryKeys: [ // 64-bit version of Notepad++ LocalMachineUninstallKey('Notepad++'), // 32-bit version of Notepad++ Wow64LocalMachineUninstallKey('Notepad++'), ], executableShimPath: [], installLocationRegistryKey: 'DisplayIcon', expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('Notepad++') && publisher === 'Notepad++ Team', }, { name: 'JetBrains Rider', registryKeys: [Wow64LocalMachineUninstallKey('JetBrains Rider 2019.3.4')], executableShimPath: ['bin', 'rider64.exe'], expectedInstallationChecker: (displayName, publisher) => displayName.startsWith('JetBrains Rider') && publisher === 'JetBrains s.r.o.', }, ] function
( keys: ReadonlyArray<RegistryValue>, key: string ): string { const entry = keys.find(k => k.name === key) return entry && entry.type === RegistryValueType.REG_SZ ? entry.data : '' } function getAppInfo( editor: IWindowsExternalEditor, keys: ReadonlyArray<RegistryValue> ): IWindowsAppInformation { const displayName = getKeyOrEmpty(keys, 'DisplayName') const publisher = getKeyOrEmpty(keys, 'Publisher') const installLocation = getKeyOrEmpty( keys, editor.installLocationRegistryKey ?? 'InstallLocation' ) return { displayName, publisher, installLocation } } async function findApplication(editor: IWindowsExternalEditor) { for (const { key, subKey } of editor.registryKeys) { const keys = enumerateValues(key, subKey) if (keys.length === 0) { continue } const { displayName, publisher, installLocation } = getAppInfo(editor, keys) if (!editor.expectedInstallationChecker(displayName, publisher)) { log.debug(`Unexpected registry entries for ${editor.name}`) continue } const path = Path.join(installLocation, ...editor.executableShimPath) const exists = await pathExists(path) if (!exists) { log.debug(`Executable for ${editor.name} not found at '${path}'`) continue } return path } return null } /** * Lookup known external editors using the Windows registry to find installed * applications and their location on disk for Desktop to launch. */ export async function getAvailableEditors(): Promise< ReadonlyArray<IFoundEditor<string>> > { const results: Array<IFoundEditor<string>> = [] for (const editor of editors) { const path = await findApplication(editor) if (path) { results.push({ editor: editor.name, path, usesShell: path.endsWith('.cmd'), }) } } return results }
getKeyOrEmpty
check_unsafety.rs
use crate::build::ExprCategory; use rustc_middle::thir::visit::{self, Visitor}; use rustc_errors::struct_span_err; use rustc_hir as hir; use rustc_middle::mir::BorrowKind; use rustc_middle::thir::*; use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt}; use rustc_session::lint::builtin::{UNSAFE_OP_IN_UNSAFE_FN, UNUSED_UNSAFE}; use rustc_session::lint::Level; use rustc_span::def_id::{DefId, LocalDefId}; use rustc_span::symbol::Symbol; use rustc_span::Span; use std::ops::Bound; struct UnsafetyVisitor<'a, 'tcx> { tcx: TyCtxt<'tcx>, thir: &'a Thir<'tcx>, /// The `HirId` of the current scope, which would be the `HirId` /// of the current HIR node, modulo adjustments. Used for lint levels. hir_context: hir::HirId, /// The current "safety context". This notably tracks whether we are in an /// `unsafe` block, and whether it has been used. safety_context: SafetyContext, body_unsafety: BodyUnsafety, /// The `#[target_feature]` attributes of the body. Used for checking /// calls to functions with `#[target_feature]` (RFC 2396). body_target_features: &'tcx Vec<Symbol>, /// When inside the LHS of an assignment to a field, this is the type /// of the LHS and the span of the assignment expression. assignment_info: Option<(Ty<'tcx>, Span)>, in_union_destructure: bool, param_env: ParamEnv<'tcx>, inside_adt: bool, } impl<'tcx> UnsafetyVisitor<'_, 'tcx> { fn in_safety_context(&mut self, safety_context: SafetyContext, f: impl FnOnce(&mut Self)) { if let ( SafetyContext::UnsafeBlock { span: enclosing_span, .. }, SafetyContext::UnsafeBlock { span: block_span, hir_id, .. }, ) = (self.safety_context, safety_context) { self.warn_unused_unsafe( hir_id, block_span, Some((self.tcx.sess.source_map().guess_head_span(enclosing_span), "block")), ); f(self); } else { let prev_context = self.safety_context; self.safety_context = safety_context; f(self); if let SafetyContext::UnsafeBlock { used: false, span, hir_id } = self.safety_context { self.warn_unused_unsafe( hir_id, span, if self.unsafe_op_in_unsafe_fn_allowed() { self.body_unsafety.unsafe_fn_sig_span().map(|span| (span, "fn")) } else { None }, ); } self.safety_context = prev_context; } } fn
(&mut self, span: Span, kind: UnsafeOpKind) { let (description, note) = kind.description_and_note(); let unsafe_op_in_unsafe_fn_allowed = self.unsafe_op_in_unsafe_fn_allowed(); match self.safety_context { SafetyContext::BuiltinUnsafeBlock => {} SafetyContext::UnsafeBlock { ref mut used, .. } => { if !self.body_unsafety.is_unsafe() || !unsafe_op_in_unsafe_fn_allowed { // Mark this block as useful *used = true; } } SafetyContext::UnsafeFn if unsafe_op_in_unsafe_fn_allowed => {} SafetyContext::UnsafeFn => { // unsafe_op_in_unsafe_fn is disallowed self.tcx.struct_span_lint_hir( UNSAFE_OP_IN_UNSAFE_FN, self.hir_context, span, |lint| { lint.build(&format!( "{} is unsafe and requires unsafe block (error E0133)", description, )) .span_label(span, description) .note(note) .emit(); }, ) } SafetyContext::Safe => { let fn_sugg = if unsafe_op_in_unsafe_fn_allowed { " function or" } else { "" }; struct_span_err!( self.tcx.sess, span, E0133, "{} is unsafe and requires unsafe{} block", description, fn_sugg, ) .span_label(span, description) .note(note) .emit(); } } } fn warn_unused_unsafe( &self, hir_id: hir::HirId, block_span: Span, enclosing_unsafe: Option<(Span, &'static str)>, ) { let block_span = self.tcx.sess.source_map().guess_head_span(block_span); self.tcx.struct_span_lint_hir(UNUSED_UNSAFE, hir_id, block_span, |lint| { let msg = "unnecessary `unsafe` block"; let mut db = lint.build(msg); db.span_label(block_span, msg); if let Some((span, kind)) = enclosing_unsafe { db.span_label(span, format!("because it's nested under this `unsafe` {}", kind)); } db.emit(); }); } /// Whether the `unsafe_op_in_unsafe_fn` lint is `allow`ed at the current HIR node. fn unsafe_op_in_unsafe_fn_allowed(&self) -> bool { self.tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, self.hir_context).0 == Level::Allow } } // Searches for accesses to layout constrained fields. struct LayoutConstrainedPlaceVisitor<'a, 'tcx> { found: bool, thir: &'a Thir<'tcx>, tcx: TyCtxt<'tcx>, } impl<'a, 'tcx> LayoutConstrainedPlaceVisitor<'a, 'tcx> { fn new(thir: &'a Thir<'tcx>, tcx: TyCtxt<'tcx>) -> Self { Self { found: false, thir, tcx } } } impl<'a, 'tcx> Visitor<'a, 'tcx> for LayoutConstrainedPlaceVisitor<'a, 'tcx> { fn thir(&self) -> &'a Thir<'tcx> { self.thir } fn visit_expr(&mut self, expr: &Expr<'tcx>) { match expr.kind { ExprKind::Field { lhs, .. } => { if let ty::Adt(adt_def, _) = self.thir[lhs].ty.kind() { if (Bound::Unbounded, Bound::Unbounded) != self.tcx.layout_scalar_valid_range(adt_def.did) { self.found = true; } } visit::walk_expr(self, expr); } // Keep walking through the expression as long as we stay in the same // place, i.e. the expression is a place expression and not a dereference // (since dereferencing something leads us to a different place). ExprKind::Deref { .. } => {} ref kind if ExprCategory::of(kind).map_or(true, |cat| cat == ExprCategory::Place) => { visit::walk_expr(self, expr); } _ => {} } } } impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> { fn thir(&self) -> &'a Thir<'tcx> { &self.thir } fn visit_block(&mut self, block: &Block) { match block.safety_mode { // compiler-generated unsafe code should not count towards the usefulness of // an outer unsafe block BlockSafety::BuiltinUnsafe => { self.in_safety_context(SafetyContext::BuiltinUnsafeBlock, |this| { visit::walk_block(this, block) }); } BlockSafety::ExplicitUnsafe(hir_id) => { self.in_safety_context( SafetyContext::UnsafeBlock { span: block.span, hir_id, used: false }, |this| visit::walk_block(this, block), ); } BlockSafety::Safe => { visit::walk_block(self, block); } } } fn visit_pat(&mut self, pat: &Pat<'tcx>) { if self.in_union_destructure { match *pat.kind { // binding to a variable allows getting stuff out of variable PatKind::Binding { .. } // match is conditional on having this value | PatKind::Constant { .. } | PatKind::Variant { .. } | PatKind::Leaf { .. } | PatKind::Deref { .. } | PatKind::Range { .. } | PatKind::Slice { .. } | PatKind::Array { .. } => { self.requires_unsafe(pat.span, AccessToUnionField); return; // we can return here since this already requires unsafe } // wildcard doesn't take anything PatKind::Wild | // these just wrap other patterns PatKind::Or { .. } | PatKind::AscribeUserType { .. } => {} } }; match &*pat.kind { PatKind::Leaf { .. } => { if let ty::Adt(adt_def, ..) = pat.ty.kind() { if adt_def.is_union() { let old_in_union_destructure = std::mem::replace(&mut self.in_union_destructure, true); visit::walk_pat(self, pat); self.in_union_destructure = old_in_union_destructure; } else if (Bound::Unbounded, Bound::Unbounded) != self.tcx.layout_scalar_valid_range(adt_def.did) { let old_inside_adt = std::mem::replace(&mut self.inside_adt, true); visit::walk_pat(self, pat); self.inside_adt = old_inside_adt; } else { visit::walk_pat(self, pat); } } else { visit::walk_pat(self, pat); } } PatKind::Binding { mode: BindingMode::ByRef(borrow_kind), ty, .. } => { if self.inside_adt { let ty::Ref(_, ty, _) = ty.kind() else { span_bug!( pat.span, "BindingMode::ByRef in pattern, but found non-reference type {}", ty ); }; match borrow_kind { BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique => { if !ty.is_freeze(self.tcx.at(pat.span), self.param_env) { self.requires_unsafe(pat.span, BorrowOfLayoutConstrainedField); } } BorrowKind::Mut { .. } => { self.requires_unsafe(pat.span, MutationOfLayoutConstrainedField); } } } visit::walk_pat(self, pat); } PatKind::Deref { .. } => { let old_inside_adt = std::mem::replace(&mut self.inside_adt, false); visit::walk_pat(self, pat); self.inside_adt = old_inside_adt; } _ => { visit::walk_pat(self, pat); } } } fn visit_expr(&mut self, expr: &Expr<'tcx>) { // could we be in the LHS of an assignment to a field? match expr.kind { ExprKind::Field { .. } | ExprKind::VarRef { .. } | ExprKind::UpvarRef { .. } | ExprKind::Scope { .. } | ExprKind::Cast { .. } => {} ExprKind::AddressOf { .. } | ExprKind::Adt { .. } | ExprKind::Array { .. } | ExprKind::Binary { .. } | ExprKind::Block { .. } | ExprKind::Borrow { .. } | ExprKind::Literal { .. } | ExprKind::ConstBlock { .. } | ExprKind::Deref { .. } | ExprKind::Index { .. } | ExprKind::NeverToAny { .. } | ExprKind::PlaceTypeAscription { .. } | ExprKind::ValueTypeAscription { .. } | ExprKind::Pointer { .. } | ExprKind::Repeat { .. } | ExprKind::StaticRef { .. } | ExprKind::ThreadLocalRef { .. } | ExprKind::Tuple { .. } | ExprKind::Unary { .. } | ExprKind::Call { .. } | ExprKind::Assign { .. } | ExprKind::AssignOp { .. } | ExprKind::Break { .. } | ExprKind::Closure { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } | ExprKind::Yield { .. } | ExprKind::Loop { .. } | ExprKind::Let { .. } | ExprKind::Match { .. } | ExprKind::Box { .. } | ExprKind::If { .. } | ExprKind::InlineAsm { .. } | ExprKind::LlvmInlineAsm { .. } | ExprKind::LogicalOp { .. } | ExprKind::Use { .. } => { // We don't need to save the old value and restore it // because all the place expressions can't have more // than one child. self.assignment_info = None; } }; match expr.kind { ExprKind::Scope { value, lint_level: LintLevel::Explicit(hir_id), region_scope: _ } => { let prev_id = self.hir_context; self.hir_context = hir_id; self.visit_expr(&self.thir[value]); self.hir_context = prev_id; return; // don't visit the whole expression } ExprKind::Call { fun, ty: _, args: _, from_hir_call: _, fn_span: _ } => { if self.thir[fun].ty.fn_sig(self.tcx).unsafety() == hir::Unsafety::Unsafe { self.requires_unsafe(expr.span, CallToUnsafeFunction); } else if let &ty::FnDef(func_did, _) = self.thir[fun].ty.kind() { // If the called function has target features the calling function hasn't, // the call requires `unsafe`. Don't check this on wasm // targets, though. For more information on wasm see the // is_like_wasm check in typeck/src/collect.rs if !self.tcx.sess.target.options.is_like_wasm && !self .tcx .codegen_fn_attrs(func_did) .target_features .iter() .all(|feature| self.body_target_features.contains(feature)) { self.requires_unsafe(expr.span, CallToFunctionWith); } } } ExprKind::Deref { arg } => { if let ExprKind::StaticRef { def_id, .. } = self.thir[arg].kind { if self.tcx.is_mutable_static(def_id) { self.requires_unsafe(expr.span, UseOfMutableStatic); } else if self.tcx.is_foreign_item(def_id) { self.requires_unsafe(expr.span, UseOfExternStatic); } } else if self.thir[arg].ty.is_unsafe_ptr() { self.requires_unsafe(expr.span, DerefOfRawPointer); } } ExprKind::InlineAsm { .. } | ExprKind::LlvmInlineAsm { .. } => { self.requires_unsafe(expr.span, UseOfInlineAssembly); } ExprKind::Adt(box Adt { adt_def, variant_index: _, substs: _, user_ty: _, fields: _, base: _, }) => match self.tcx.layout_scalar_valid_range(adt_def.did) { (Bound::Unbounded, Bound::Unbounded) => {} _ => self.requires_unsafe(expr.span, InitializingTypeWith), }, ExprKind::Closure { closure_id, substs: _, upvars: _, movability: _, fake_reads: _, } => { let closure_id = closure_id.expect_local(); let closure_def = if let Some((did, const_param_id)) = ty::WithOptConstParam::try_lookup(closure_id, self.tcx) { ty::WithOptConstParam { did, const_param_did: Some(const_param_id) } } else { ty::WithOptConstParam::unknown(closure_id) }; let (closure_thir, expr) = self.tcx.thir_body(closure_def); let closure_thir = &closure_thir.borrow(); let hir_context = self.tcx.hir().local_def_id_to_hir_id(closure_id); let mut closure_visitor = UnsafetyVisitor { thir: closure_thir, hir_context, ..*self }; closure_visitor.visit_expr(&closure_thir[expr]); // Unsafe blocks can be used in closures, make sure to take it into account self.safety_context = closure_visitor.safety_context; } ExprKind::Field { lhs, .. } => { let lhs = &self.thir[lhs]; if let ty::Adt(adt_def, _) = lhs.ty.kind() { if adt_def.is_union() { if let Some((assigned_ty, assignment_span)) = self.assignment_info { // To avoid semver hazard, we only consider `Copy` and `ManuallyDrop` non-dropping. if !(assigned_ty .ty_adt_def() .map_or(false, |adt| adt.is_manually_drop()) || assigned_ty .is_copy_modulo_regions(self.tcx.at(expr.span), self.param_env)) { self.requires_unsafe(assignment_span, AssignToDroppingUnionField); } else { // write to non-drop union field, safe } } else { self.requires_unsafe(expr.span, AccessToUnionField); } } } } ExprKind::Assign { lhs, rhs } | ExprKind::AssignOp { lhs, rhs, .. } => { let lhs = &self.thir[lhs]; // First, check whether we are mutating a layout constrained field let mut visitor = LayoutConstrainedPlaceVisitor::new(self.thir, self.tcx); visit::walk_expr(&mut visitor, lhs); if visitor.found { self.requires_unsafe(expr.span, MutationOfLayoutConstrainedField); } // Second, check for accesses to union fields // don't have any special handling for AssignOp since it causes a read *and* write to lhs if matches!(expr.kind, ExprKind::Assign { .. }) { self.assignment_info = Some((lhs.ty, expr.span)); visit::walk_expr(self, lhs); self.assignment_info = None; visit::walk_expr(self, &self.thir()[rhs]); return; // we have already visited everything by now } } ExprKind::Borrow { borrow_kind, arg } => { let mut visitor = LayoutConstrainedPlaceVisitor::new(self.thir, self.tcx); visit::walk_expr(&mut visitor, expr); if visitor.found { match borrow_kind { BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique if !self.thir[arg] .ty .is_freeze(self.tcx.at(self.thir[arg].span), self.param_env) => { self.requires_unsafe(expr.span, BorrowOfLayoutConstrainedField) } BorrowKind::Mut { .. } => { self.requires_unsafe(expr.span, MutationOfLayoutConstrainedField) } BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique => {} } } } ExprKind::Let { expr: expr_id, .. } => { let let_expr = &self.thir[expr_id]; if let ty::Adt(adt_def, _) = let_expr.ty.kind() { if adt_def.is_union() { self.requires_unsafe(expr.span, AccessToUnionField); } } } _ => {} } visit::walk_expr(self, expr); } } #[derive(Clone, Copy)] enum SafetyContext { Safe, BuiltinUnsafeBlock, UnsafeFn, UnsafeBlock { span: Span, hir_id: hir::HirId, used: bool }, } #[derive(Clone, Copy)] enum BodyUnsafety { /// The body is not unsafe. Safe, /// The body is an unsafe function. The span points to /// the signature of the function. Unsafe(Span), } impl BodyUnsafety { /// Returns whether the body is unsafe. fn is_unsafe(&self) -> bool { matches!(self, BodyUnsafety::Unsafe(_)) } /// If the body is unsafe, returns the `Span` of its signature. fn unsafe_fn_sig_span(self) -> Option<Span> { match self { BodyUnsafety::Unsafe(span) => Some(span), BodyUnsafety::Safe => None, } } } #[derive(Clone, Copy, PartialEq)] enum UnsafeOpKind { CallToUnsafeFunction, UseOfInlineAssembly, InitializingTypeWith, UseOfMutableStatic, UseOfExternStatic, DerefOfRawPointer, AssignToDroppingUnionField, AccessToUnionField, MutationOfLayoutConstrainedField, BorrowOfLayoutConstrainedField, CallToFunctionWith, } use UnsafeOpKind::*; impl UnsafeOpKind { pub fn description_and_note(&self) -> (&'static str, &'static str) { match self { CallToUnsafeFunction => ( "call to unsafe function", "consult the function's documentation for information on how to avoid undefined \ behavior", ), UseOfInlineAssembly => ( "use of inline assembly", "inline assembly is entirely unchecked and can cause undefined behavior", ), InitializingTypeWith => ( "initializing type with `rustc_layout_scalar_valid_range` attr", "initializing a layout restricted type's field with a value outside the valid \ range is undefined behavior", ), UseOfMutableStatic => ( "use of mutable static", "mutable statics can be mutated by multiple threads: aliasing violations or data \ races will cause undefined behavior", ), UseOfExternStatic => ( "use of extern static", "extern statics are not controlled by the Rust type system: invalid data, \ aliasing violations or data races will cause undefined behavior", ), DerefOfRawPointer => ( "dereference of raw pointer", "raw pointers may be null, dangling or unaligned; they can violate aliasing rules \ and cause data races: all of these are undefined behavior", ), AssignToDroppingUnionField => ( "assignment to union field that might need dropping", "the previous content of the field will be dropped, which causes undefined \ behavior if the field was not properly initialized", ), AccessToUnionField => ( "access to union field", "the field may not be properly initialized: using uninitialized data will cause \ undefined behavior", ), MutationOfLayoutConstrainedField => ( "mutation of layout constrained field", "mutating layout constrained fields cannot statically be checked for valid values", ), BorrowOfLayoutConstrainedField => ( "borrow of layout constrained field with interior mutability", "references to fields of layout constrained fields lose the constraints. Coupled \ with interior mutability, the field can be changed to invalid values", ), CallToFunctionWith => ( "call to function with `#[target_feature]`", "can only be called if the required target features are available", ), } } } pub fn check_unsafety<'tcx>(tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>) { // THIR unsafeck is gated under `-Z thir-unsafeck` if !tcx.sess.opts.debugging_opts.thir_unsafeck { return; } // Closures are handled by their owner, if it has a body if tcx.is_closure(def.did.to_def_id()) { let hir = tcx.hir(); let owner = hir.enclosing_body_owner(hir.local_def_id_to_hir_id(def.did)); tcx.ensure().thir_check_unsafety(hir.local_def_id(owner)); return; } let (thir, expr) = tcx.thir_body(def); let thir = &thir.borrow(); // If `thir` is empty, a type error occured, skip this body. if thir.exprs.is_empty() { return; } let hir_id = tcx.hir().local_def_id_to_hir_id(def.did); let body_unsafety = tcx.hir().fn_sig_by_hir_id(hir_id).map_or(BodyUnsafety::Safe, |fn_sig| { if fn_sig.header.unsafety == hir::Unsafety::Unsafe { BodyUnsafety::Unsafe(fn_sig.span) } else { BodyUnsafety::Safe } }); let body_target_features = &tcx.codegen_fn_attrs(def.did).target_features; let safety_context = if body_unsafety.is_unsafe() { SafetyContext::UnsafeFn } else { SafetyContext::Safe }; let mut visitor = UnsafetyVisitor { tcx, thir, safety_context, hir_context: hir_id, body_unsafety, body_target_features, assignment_info: None, in_union_destructure: false, param_env: tcx.param_env(def.did), inside_adt: false, }; visitor.visit_expr(&thir[expr]); } crate fn thir_check_unsafety<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) { if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) { tcx.thir_check_unsafety_for_const_arg(def) } else { check_unsafety(tcx, ty::WithOptConstParam::unknown(def_id)) } } crate fn thir_check_unsafety_for_const_arg<'tcx>( tcx: TyCtxt<'tcx>, (did, param_did): (LocalDefId, DefId), ) { check_unsafety(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) }) }
requires_unsafe
tiny_text.rs
/*! This crate implements various [CqlType](https://docs.rs/cql_model/0.2/cql_model/trait.CqlType.html) derivatives for storing String values of up to (and including) 255 chars in a [CQL database](https://docs.rs/cql_db/0.2/cql_db/). Will allocate 1020 bytes per value [linked](https://docs.rs/cql_db/0.2/cql_db/fn.link_dimensions.html). # Benchmarks Benchmarks supplied below are fairly rudimentary (and rounded) and are there to give a rough idea of relative costs. Full benchmark code can be found in [github](https://github.com/AndrewSisley/CQLDb/tree/master/cql_storage_types/cql_tiny_text) and can be run with `rustup run nightly cargo bench`, but please be aware that they will allocate ~102MB of disk space. The read_to_stream benchmarks also differ slightly from other [CqlType](https://docs.rs/cql_model/0.2/cql_model/trait.CqlType.html) derivatives as they stream into a Vector, not an Array. Operation | Database dimensions | Mean time (ns) --- | --- | --- Single point read | 1 | 3 060 (+/- 200) Single point read | 4 | 15 800 (+/- 1 100) Single point write | 1 | 2 800 (+/- 300) Single point write | 4 | 15 400 (+/- 1 000) Stream read 1 point | 1 | 3 500 (+/- 300) Stream read 1 point | 4 | 15 500 (+/- 1 100) Stream read 50 000 points | 1 | 56 700 000 (+/- 800 000) Stream read 50 000 points | 4 | 56 400 000 (+/- 150 000) # Examples The following creates a 1D database, writes 2 values to it, and then streams them into an array. ``` # use std::convert::TryFrom; # use std::io::{ Cursor, SeekFrom, Seek }; # use cql_tiny_text::{ TinyText, unpack_stream }; # # use std::error::Error; # use std::fs::remove_file; # fn main() -> Result<(), Box<dyn Error>> { # let _ = remove_file(format!("{}{}", DATABASE_LOCATION, "/db")); # let _ = remove_file(format!("{}{}", DATABASE_LOCATION, "/ax")); # # const DATABASE_LOCATION: &str = "./.test_db"; const N_VALUES_TO_READ: usize = 3; let base_point = [1]; let value1 = "item one"; let value3 = "شماره ۳"; cql_db::create_db::<TinyText>( DATABASE_LOCATION, &[3] ); cql_db::write_value::<TinyText>( DATABASE_LOCATION, &base_point, TinyText::try_from(value1)? )?; cql_db::write_value::<TinyText>( DATABASE_LOCATION, &[base_point[0] + 2], TinyText::try_from(value3)? )?; let mut result = Vec::with_capacity(N_VALUES_TO_READ); let mut stream = Cursor::new(Vec::new()); cql_db::read_to_stream::<TinyText>( DATABASE_LOCATION, &mut stream, &base_point, N_VALUES_TO_READ as u64 )?; stream.seek(SeekFrom::Start(0)); unpack_stream(&mut stream, N_VALUES_TO_READ, |_, value| { result.push(value) })?; assert_eq!(result[0], TinyText::try_from(value1)?); assert_eq!(result[1], TinyText::new()); assert_eq!(result[2], TinyText::try_from(value3)?); # Ok(()) # } ``` */ #![doc(html_root_url = "https://docs.rs/cql_tiny_text/0.2.1")] pub mod errors; pub mod interop; use std::fs::{ File, OpenOptions }; use std::io; use std::io::{ Read, Write, Cursor, SeekFrom, Seek }; use byteorder::{ ReadBytesExt, WriteBytesExt, LittleEndian }; use cql_model::{ CqlType, CqlWritable, CqlReadable, CqlStreamReadable }; const CONTENT_SIZE: usize = 255 * 4; const LENGTH_SIZE: usize = 2; /// Tuple wrapping `String` for working with `TinyText` values in a [CQL database](https://docs.rs/cql_db/). /// /// Limited in size to `255 * 4 = 1020` bytes. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default, Hash)] pub struct TinyText(String); impl CqlType for TinyText { type ValueType = Self; const VALUE_SIZE: usize = CONTENT_SIZE + LENGTH_SIZE; } impl TinyText { pub fn new() -> Self { TinyText(String::new()) } } impl CqlWritable for TinyText { fn write_to_db(db_location: &str, value_location: u64, input_value: Self::ValueType) -> io::Result<()> { let mut file = OpenOptions::new().write(true).open(db_location)?; // unwrap should be considered safe by this point, with earlier checks in the cql_db crate (if not deliberately unchecked) file.seek(SeekFrom::Start(value_location * Self::VALUE_SIZE as u64)).unwrap(); let input_length: u16 = input_value.0.len() as u16; let mut buffer = vec![]; buffer.write_u16::<LittleEndian>(input_length)?; buffer.extend(&input_value.0.into_bytes()); file.write_all(&buffer) } } impl CqlReadable for TinyText { fn read_from_db(db_location: &str, value_location: u64) -> io::Result<Self::ValueType> {
pl CqlStreamReadable for TinyText { fn read_to_stream(db_location: &str, stream: &mut dyn Write, value_location: u64, n_values: u64) -> io::Result<()> { let mut file = File::open(&db_location)?; // unwrap should be considered safe by this point, with earlier checks in the cql_db crate (if not deliberately unchecked) file.seek(SeekFrom::Start(value_location * Self::VALUE_SIZE as u64)).unwrap(); let mut value_buffer = [0; CONTENT_SIZE]; for _i in 0..n_values { // must have value cleared for each value read or previous value will be quietly retained and re-written to the (out) stream let mut size_buffer = [0; LENGTH_SIZE]; match file.read_exact(&mut size_buffer) { Err(e) => { // ignore io::ErrorKind::UnexpectedEof and continue if e.kind() != io::ErrorKind::UnexpectedEof { return Err(e) } } _ => { } } let mut size_rdr = Cursor::new(size_buffer); let size = usize::from(size_rdr.read_u16::<LittleEndian>()?); match file.read_exact(&mut value_buffer) { Err(e) => { // ignore io::ErrorKind::UnexpectedEof and continue if e.kind() != io::ErrorKind::UnexpectedEof { return Err(e) } } _ => { } } let mut write_buffer = Vec::with_capacity(LENGTH_SIZE + size); write_buffer.extend(&size_buffer); for i in 0..size { write_buffer.push(value_buffer[i]); } stream.write_all(&mut write_buffer)?; } stream.flush() } } /// Unpacks `n_values` of `TinyText` from a stream, calling `value_handler` with each value and it's index. /// /// # Errors /// /// Will return any [I/O errors](https://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html) encountered during the execution of the function. If an error /// is returned, it may be that values have already been fed into the `value_handler`. /// /// # Panics /// /// Function does not actively defend against panics, and may do so if given invalid parameters. If the function panics it may be that values have /// already been fed into the `value_handler`. /// /// # Examples /// ```ignore /// cql_db::read_to_stream::<TinyText>( /// DATABASE_LOCATION, /// &mut stream, /// &base_point, /// N_VALUES_TO_READ as u64 /// )?; /// /// stream.seek(SeekFrom::Start(0)); /// /// unpack_stream(&mut stream, N_VALUES_TO_READ, |idx, value| { /// result[idx] = value /// })?; /// ``` pub fn unpack_stream<F>(stream: &mut Cursor<Vec<u8>>, n_values: usize, mut value_handler: F) -> io::Result<()> where F: FnMut(usize, TinyText) { let mut size_buffer = [0; LENGTH_SIZE]; for index in 0..n_values { stream.read_exact(&mut size_buffer)?; let mut size_rdr = Cursor::new(size_buffer); let size = usize::from(size_rdr.read_u16::<LittleEndian>()?); if size == 0 { value_handler(index, TinyText::new()); } else { let mut value_buffer = vec![0; size]; stream.read_exact(&mut value_buffer)?; // unwrap should be safe here, as we assume we are the only ones writing to the file, however low performance cost plus the fact that someone else `could` // write to the file discourages the use of the unsafe method that skips the checks value_handler(index, TinyText(String::from_utf8(value_buffer).unwrap())); } } Ok(()) }
let mut file = File::open(&db_location)?; // unwrap should be considered safe by this point, with earlier checks in the cql_db crate (if not deliberately unchecked) file.seek(SeekFrom::Start(value_location * Self::VALUE_SIZE as u64)).unwrap(); let mut size_buffer = [0; LENGTH_SIZE]; file.read_exact(&mut size_buffer)?; let mut size_rdr = Cursor::new(size_buffer); let size = usize::from(size_rdr.read_u16::<LittleEndian>()?); if size == 0 { return Ok(TinyText::new()) } let mut value_buffer = Vec::with_capacity(size); file.take(size as u64).read_to_end(&mut value_buffer)?; Ok( TinyText( // unwrap should be safe here, as we assume we are the only ones writing to the file, however low performance cost plus the fact that someone else `could` // write to the file discourages the use of the unsafe method that skips the checks String::from_utf8(value_buffer).unwrap() ) ) } } im
shipment_test.py
import os import os.path import sys import datetime import webbrowser import argparse import time import traceback import selenium import selenium.webdriver.chrome.options import pathlib # ======== Command args singleton class CommandArgs: def __init__ (self): self.argParser = argparse.ArgumentParser () self.argParser.add_argument ('-de', '--dextex', help = "show extended exception reports", action = 'store_true') self.argParser.add_argument ('-f', '--fcall', help = 'test fast calls', action = 'store_true') self.argParser.add_argument ('-i', '--inst', help = 'installed version rather than new one', action = 'store_true') self.argParser.add_argument ('-b', '--blind', help = 'don\'t start browser', action = 'store_true') self.argParser.add_argument ('-u', '--unattended', help = 'unattended mode', action = 'store_true') self.__dict__.update (self.argParser.parse_args () .__dict__) commandArgs = CommandArgs () # ======== Browser controller singleton class BrowserController: def __init__ (self): self.options = selenium.webdriver.chrome.options.Options () self.options.add_argument ('start-maximized') if commandArgs.unattended: self.options.add_argument ('--headless') # Runs Chrome in headless mode. self.options.add_argument ('--no-sandbox') # Bypass OS security model self.options.add_argument ('--disable-gpu') # Applicable to windows OS only self.options.add_argument ('disable-infobars') self.options.add_argument ('--disable-extensions') self.webDriver = selenium.webdriver.Chrome (chrome_options = self.options) self.nrOfTabs = 0 def waitForNewTab (self): while len (self.webDriver.window_handles) <= self.nrOfTabs: time.sleep (0.5) self.nrOfTabs = len (self.webDriver.window_handles) def
(self, url, run): print (f'Browser controller is opening URL: {url}') try: if self.nrOfTabs > 0: if commandArgs.unattended: # ---- Show in existing tab self.webDriver.execute_script (f'window.location.href = "{url}";') else: # ---- Open new tab self.webDriver.execute_script (f'window.open ("{url}","_blank");') # !!! Avoid redundant open command self.waitForNewTab () self.webDriver.switch_to.window (self.webDriver.window_handles [-1]) else: # ---- Open browser and default tab self.webDriver.get (url) self.waitForNewTab () except: self.webDriver.switch_to.alert.accept(); if run: while (True): self.message = self.webDriver.find_element_by_id ('message') if 'failed' in self.message.text or 'succeeded' in self.message.text: break time.sleep (0.5) print () print ('=========================================================================') print (f'Back to back autotest, result: {self.message.text.upper ()}') print ('=========================================================================') print () if 'succeeded' in self.message.text: return True else: return False else: print () print ('=========================================================================') print ('No back to back autotest') print ('=========================================================================') print () return True browserController = BrowserController () # ======== Preparations relSourcePrepathsOfErrors = [] host = 'http://localhost:' pythonServerPort = '8000' parcelServerPort = '8001' nodeServerPort = '8002' pythonServerUrl = host + pythonServerPort parcelServerUrl = host + parcelServerPort nodeServerUrl = host + nodeServerPort transpileCommand = 'transcrypt' if commandArgs.inst else 'run_transcrypt' shipDir = os.path.dirname (os.path.abspath (__file__)) .replace ('\\', '/') appRootDir = '/'.join (shipDir.split ('/')[ : -2]) print (f'\nApplication root directory: {appRootDir}\n') def getAbsPath (relPath): return '{}/{}'.format (appRootDir, relPath) os.system ('cls' if os.name == 'nt' else 'clear') # ---- Start an http server in the Transcryp/transcrypt directory if not commandArgs.blind: if commandArgs.unattended: os.system (f'py37 -m http.server --directory {appRootDir} &') else: os.system (f'py37 -m http.server --directory {appRootDir} &') # ---- Allow visual check of all command line options os.system (f'{transpileCommand} -h') # ======== Individual test function def test (relSourcePrepath, run, extraSwitches, messagePrename = '', nodeJs = False, parcelJs = False, build = True, pause = 0, needsAttention = False): if commandArgs.unattended and needsAttention: return # This test shouldn't be done, since it can't run unattended print (f'\n\n******** BEGIN TEST {relSourcePrepath} ********\n') time.sleep (pause) # ---- Compute some slugs sourcePrepath = getAbsPath (relSourcePrepath) sourcePrepathSplit = sourcePrepath.split ("/") sourceDir = '/'.join (sourcePrepathSplit [:-1]) moduleName = sourcePrepathSplit [-1] targetDir = f'{sourceDir}/__target__' targetPrepath = f'{targetDir}/{moduleName}' messagePrepath = f'{targetDir}/{messagePrename}' # ---- If there are relevant console messages of the compilation process, # like with the static typechecking tests, write them into a file that can be served for a visual check if not os.path.exists (targetDir): os.makedirs (targetDir) # Transcrypt will make targetDir too late, so it has to happen here redirect = f' > {messagePrepath}.out' if messagePrename else '' # ---- Default switches defaultSwitches = '-da -sf -de -m -n ' if commandArgs.dextex: defaultSwitches += '-de ' if build: defaultSwitches += '-b ' # ---- Run with CPython to generate HTML file with back to back reference info if run: os.system (f'{transpileCommand} -r {defaultSwitches}{extraSwitches}{sourcePrepath}') # ---- Compile with Transcrypt if parcelJs: origDir = os.getcwd () os.chdir (sourceDir) os.system (f'start cmd /k node test {parcelServerPort}') os.chdir (origDir) else: os.system (f'{transpileCommand} {defaultSwitches}{extraSwitches}{sourcePrepath}{redirect}') # ---- If it has to run on node, apply rollup to obtain monolith, since node doesn't support named imports and exports if nodeJs: os.system (f'rollup {targetPrepath}.js --o {targetPrepath}.bundle.js --f cjs') # --- Compute appropriate URL and wait a while if needed if not commandArgs.blind: if parcelJs: time.sleep (20) url = parcelServerUrl elif nodeJs: os.system (f'start cmd /k node {targetPrepath}.bundle.js {nodeServerPort}') time.sleep (5) url = nodeServerUrl else: url = f'{pythonServerUrl}/{relSourcePrepath}.html' success = browserController.open (url, run) if commandArgs.unattended and not success: relSourcePrepathsOfErrors.append (relSourcePrepath) print (f'\n******** END TEST {relSourcePrepath} ********\n\n') # ======== Perform individual tests for switches in (('', '-f ') if commandArgs.fcall else ('',)): test ('development/automated_tests/hello/autotest', True, switches) test ('development/automated_tests/transcrypt/autotest', True, switches + '-c -xr -xg ') test ('development/automated_tests/time/autotest', True, switches, needsAttention = True) test ('development/automated_tests/re/autotest', True, switches) test ('development/manual_tests/async_await/test', False, switches) test ('development/manual_tests/import_export_aliases/test', False, switches + '-am ') test ('development/manual_tests/module_random/module_random', False, switches) test ('development/manual_tests/static_types/static_types', False, switches + '-ds -dc ', messagePrename = 'static_types') test ('development/manual_tests/transcrypt_and_python_results_differ/results', False, switches) test ('development/manual_tests/transcrypt_only/transcrypt_only', False, switches) test ('demos/nodejs_demo/nodejs_demo', False, switches, nodeJs = True) test ('demos/parcel_demo/test_shipment', False, switches, parcelJs = True) test ('demos/terminal_demo/terminal_demo', False, switches, needsAttention = True) test ('demos/hello/hello', False, switches, needsAttention = False) test ('demos/jquery_demo/jquery_demo', False, switches) test ('demos/d3js_demo/d3js_demo', False, switches) test ('demos/ios_app/ios_app', False, switches) test ('demos/react_demo/react_demo', False, switches) test ('demos/riot_demo/riot_demo', False, switches) test ('demos/plotly_demo/plotly_demo', False, switches) test ('demos/three_demo/three_demo', False, switches) test ('demos/pong/pong', False, switches) test ('demos/pysteroids_demo/pysteroids', False, switches) test ('demos/turtle_demos/star', False, switches, pause = 2) test ('demos/turtle_demos/snowflake', False, switches, pause = 2) test ('demos/turtle_demos/mondrian', False, switches, pause = 2) test ('demos/turtle_demos/mandala', False, switches, pause = 2) # test ('demos/cyclejs_demo/cyclejs_demo', False, switches) test ('demos/cyclejs_demo/cyclejs_http_demo', False, switches) test ('demos/cyclejs_demo/component_demos/isolated_bmi_slider/bmi', False, switches) test ('demos/cyclejs_demo/component_demos/labeled_slider/labeled_slider', False, switches) test ('tutorials/baseline/bl_010_hello_world/hello_world', False, switches) test ('tutorials/baseline/bl_020_assign/assign', False, switches) test ('tutorials/baseline/bl_030_if_else_prompt/if_else_prompt', False, switches, needsAttention = True) test ('tutorials/baseline/bl_035_if_else_event/if_else_event', False, switches, needsAttention = True) test ('tutorials/baseline/bl_040_for_simple/for_simple', False, switches) test ('tutorials/baseline/bl_042_for_nested/for_nested', False, switches) test ('tutorials/baseline/bl_045_while_simple/while_simple', False, switches, needsAttention = True) test ('tutorials/static_typing/static_typing', False, switches + '-c -ds ', messagePrename = 'static_typing') if relSourcePrepathsOfErrors: print ('\n\n!!!!!!!!!!!!!!!!!!!!\n') for relSourcePrepathOfError in relSourcePrepathsOfErrors: print (f'SHIPMENT TEST ERROR: {relSourcePrepathOfError}') print ('\n!!!!!!!!!!!!!!!!!!!!\n\n') print ('\nSHIPMENT TEST FAILED\n') sys.exit (1) else: # ---- Make docs, the resulting files are untracked if not commandArgs.unattended: origDir = os.getcwd () sphinxDir = '/'.join ([appRootDir, 'docs/sphinx']) os.chdir (sphinxDir) os.system ('touch *.rst') os.system ('make html') os.chdir (origDir) # ---- Terminate print ('\nSHIPMENT TEST SUCCEEDED\n') sys.exit (0)
open
smoke.rs
extern crate tempdir; use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::process::Command; use std::{env, fs}; use tempdir::TempDir; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {}", stringify!($e), e), }) } fn cargo_sysroot() -> Command { let mut me = t!(env::current_exe()); me.pop(); me.push("cargo-sysroot"); let mut cmd = Command::new(me); cmd.arg("sysroot"); return cmd } fn exists_rlib(krate: &str, profile: &str, target: &str, sysroot: &Path) -> bool { for entry in t!(fs::read_dir(sysroot.join(format!("{}/lib/rustlib/{}/lib", profile, target)))) { let path = t!(entry).path(); let filename = path.file_stem().unwrap().to_str().unwrap(); let extension = path.extension().unwrap().to_str().unwrap(); if filename.starts_with(&format!("lib{}", krate)) && extension == "rlib" && path.is_file() { return true; } } false } #[test] fn supported_triple() { let triple = "arm-unknown-linux-gnueabihf"; let td = t!(TempDir::new("cargo-sysroot")); run(cargo_sysroot().arg("--target") .arg(triple) .arg(td.path()) .arg("--verbose")); assert!(exists_rlib("core", "debug", triple, td.path())); run(cargo_sysroot().arg("--target") .arg(triple) .arg(td.path()) .arg("--verbose") .arg("--release")); assert!(exists_rlib("core", "debug", triple, td.path())); assert!(exists_rlib("core", "release", triple, td.path())); } #[test] fn custom_target() { let spec = r#" { "arch": "arm", "llvm-target": "thumbv7m-none-eabi", "os": "none", "target-endian": "little", "target-pointer-width": "32", "archive-format": "gnu" } "#; let td = t!(TempDir::new("cargo-sysroot")); t!(t!(File::create(td.path().join("custom.json"))).write_all(spec.as_bytes())); // test --target triple run(cargo_sysroot().arg("--target=custom") .arg(td.path().join("target"))
.arg("--verbose") .current_dir(td.path())); assert!(exists_rlib("core", "debug", "custom", &td.path().join("target"))); // test /path/to/target.json run(cargo_sysroot().arg("--target") .arg(td.path().join("custom.json")) .arg(td.path().join("other")) .arg("--verbose")); assert!(exists_rlib("core", "debug", "custom", &td.path().join("other"))); // make sure the original spec is there but the copied version is gone assert!(td.path().join("custom.json").is_file()); assert!(!td.path().join("other/src/libcore/custom.json").is_file()); } #[test] fn sysroot_toml() { let toml = r#" [target.arm-unknown-linux-gnueabihf] crates = ["collections"] "#; let triple = "arm-unknown-linux-gnueabihf"; let td = t!(TempDir::new("cargo-sysroot")); t!(t!(File::create(td.path().join("sysroot.toml"))).write_all(toml.as_bytes())); run(cargo_sysroot().args(&["--target", triple]) .arg(td.path()) .arg("--verbose") .current_dir(td.path())); assert!(exists_rlib("core", "debug", triple, td.path())); assert!(exists_rlib("collections", "debug", triple, td.path())); } fn run(cmd: &mut Command) { println!("running: {:?}", cmd); let output = t!(cmd.output()); if !output.status.success() { println!("--- stdout:\n{}", String::from_utf8_lossy(&output.stdout)); println!("--- stderr:\n{}", String::from_utf8_lossy(&output.stderr)); panic!("expected success, got: {}", output.status); } }
validate.rs
//! Validates the MIR to ensure that invariants are upheld. use rustc_index::bit_set::BitSet; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::visit::{PlaceContext, Visitor}; use rustc_middle::mir::{ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, Local, Location, MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK, }; use rustc_middle::ty::fold::BottomUpFolder; use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable}; use rustc_mir_dataflow::impls::MaybeStorageLive; use rustc_mir_dataflow::storage::AlwaysLiveLocals; use rustc_mir_dataflow::{Analysis, ResultsCursor}; use rustc_target::abi::Size; #[derive(Copy, Clone, Debug)] enum EdgeKind { Unwind, Normal, } pub struct Validator { /// Describes at which point in the pipeline this validation is happening. pub when: String, /// The phase for which we are upholding the dialect. If the given phase forbids a specific /// element, this validator will now emit errors if that specific element is encountered. /// Note that phases that change the dialect cause all *following* phases to check the /// invariants of the new dialect. A phase that changes dialects never checks the new invariants /// itself. pub mir_phase: MirPhase, } impl<'tcx> MirPass<'tcx> for Validator { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not // terribly important that they pass the validator. However, I think other passes might // still see them, in which case they might be surprised. It would probably be better if we // didn't put this through the MIR pipeline at all. if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) { return; } let def_id = body.source.def_id(); let param_env = tcx.param_env(def_id); let mir_phase = self.mir_phase; let always_live_locals = AlwaysLiveLocals::new(body); let storage_liveness = MaybeStorageLive::new(always_live_locals) .into_engine(tcx, body) .iterate_to_fixpoint() .into_results_cursor(body); TypeChecker { when: &self.when, body, tcx, param_env, mir_phase, reachable_blocks: traversal::reachable_as_bitset(body), storage_liveness, place_cache: Vec::new(), value_cache: Vec::new(), } .visit_body(body); } } /// Returns whether the two types are equal up to lifetimes. /// All lifetimes, including higher-ranked ones, get ignored for this comparison. /// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.) /// /// The point of this function is to approximate "equal up to subtyping". However, /// the approximation is incorrect as variance is ignored. pub fn equal_up_to_regions<'tcx>( tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, src: Ty<'tcx>, dest: Ty<'tcx>, ) -> bool { // Fast path. if src == dest { return true; } // Normalize lifetimes away on both sides, then compare. let normalize = |ty: Ty<'tcx>| { tcx.normalize_erasing_regions( param_env, ty.fold_with(&mut BottomUpFolder { tcx, // FIXME: We erase all late-bound lifetimes, but this is not fully correct. // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`, // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`, // since one may have an `impl SomeTrait for fn(&32)` and // `impl SomeTrait for fn(&'static u32)` at the same time which // specify distinct values for Assoc. (See also #56105) lt_op: |_| tcx.lifetimes.re_erased, // Leave consts and types unchanged. ct_op: |ct| ct, ty_op: |ty| ty, }), ) }; tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok()) } struct TypeChecker<'a, 'tcx> { when: &'a str, body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, mir_phase: MirPhase, reachable_blocks: BitSet<BasicBlock>, storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>, place_cache: Vec<PlaceRef<'tcx>>, value_cache: Vec<u128>, } impl<'a, 'tcx> TypeChecker<'a, 'tcx> { fn fail(&self, location: Location, msg: impl AsRef<str>) { let span = self.body.source_info(location).span; // We use `delay_span_bug` as we might see broken MIR when other errors have already // occurred. self.tcx.sess.diagnostic().delay_span_bug( span, &format!( "broken MIR in {:?} ({}) at {:?}:\n{}", self.body.source.instance, self.when, location, msg.as_ref() ), ); } fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) { if bb == START_BLOCK { self.fail(location, "start block must not have predecessors") } if let Some(bb) = self.body.basic_blocks().get(bb) { let src = self.body.basic_blocks().get(location.block).unwrap(); match (src.is_cleanup, bb.is_cleanup, edge_kind) { // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges (false, false, EdgeKind::Normal) // Non-cleanup blocks can jump to cleanup blocks along unwind edges | (false, true, EdgeKind::Unwind) // Cleanup blocks can jump to cleanup blocks along non-unwind edges | (true, true, EdgeKind::Normal) => {} // All other jumps are invalid _ => { self.fail( location, format!( "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})", edge_kind, bb, src.is_cleanup, bb.is_cleanup, ) ) } } } else { self.fail(location, format!("encountered jump to invalid basic block {:?}", bb)) } } /// Check if src can be assigned into dest. /// This is not precise, it will accept some incorrect assignments. fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool { // Fast path before we normalize. if src == dest { // Equal types, all is good. return true; } // Normalization reveals opaque types, but we may be validating MIR while computing // said opaque types, causing cycles. if (src, dest).has_opaque_types() { return true; } // Normalize projections and things like that. let param_env = self.param_env.with_reveal_all_normalized(self.tcx); let src = self.tcx.normalize_erasing_regions(param_env, src); let dest = self.tcx.normalize_erasing_regions(param_env, dest); // Type-changing assignments can happen when subtyping is used. While // all normal lifetimes are erased, higher-ranked types with their // late-bound lifetimes are still around and can lead to type // differences. So we compare ignoring lifetimes. equal_up_to_regions(self.tcx, param_env, src, dest) } } impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) { if self.body.local_decls.get(*local).is_none() { self.fail( location, format!("local {:?} has no corresponding declaration in `body.local_decls`", local), ); } if self.reachable_blocks.contains(location.block) && context.is_use() { // Uses of locals must occur while the local's storage is allocated. self.storage_liveness.seek_after_primary_effect(location); let locals_with_storage = self.storage_liveness.get(); if !locals_with_storage.contains(*local) { self.fail(location, format!("use of local {:?}, which has no storage here", local)); } } } fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed. if self.tcx.sess.opts.debugging_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered { // `Operand::Copy` is only supposed to be used with `Copy` types. if let Operand::Copy(place) = operand { let ty = place.ty(&self.body.local_decls, self.tcx).ty; let span = self.body.source_info(location).span; if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) { self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty)); } } } self.super_operand(operand, location); } fn visit_projection_elem( &mut self, local: Local, proj_base: &[PlaceElem<'tcx>], elem: PlaceElem<'tcx>, context: PlaceContext, location: Location, ) { if let ProjectionElem::Index(index) = elem { let index_ty = self.body.local_decls[index].ty; if index_ty != self.tcx.types.usize { self.fail(location, format!("bad index ({:?} != usize)", index_ty)) } } self.super_projection_elem(local, proj_base, elem, context, location); } fn visit_place(&mut self, place: &Place<'tcx>, _: PlaceContext, _: Location) { // Set off any `bug!`s in the type computation code let _ = place.ty(&self.body.local_decls, self.tcx); } fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { macro_rules! check_kinds { ($t:expr, $text:literal, $($patterns:tt)*) => { if !matches!(($t).kind(), $($patterns)*) { self.fail(location, format!($text, $t)); } }; } match rvalue { Rvalue::Use(_) => {} Rvalue::Aggregate(agg_kind, _) => { let disallowed = match **agg_kind { AggregateKind::Array(..) => false, AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered, _ => self.mir_phase >= MirPhase::Deaggregated, }; if disallowed { self.fail( location, format!("{:?} have been lowered to field assignments", rvalue), ) } } Rvalue::Ref(_, BorrowKind::Shallow, _) => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase", ); } } Rvalue::Len(p) => { let pty = p.ty(&self.body.local_decls, self.tcx).ty; check_kinds!( pty, "Cannot compute length of non-array type {:?}", ty::Array(..) | ty::Slice(..) ); } Rvalue::BinaryOp(op, vals) | Rvalue::CheckedBinaryOp(op, vals) => { use BinOp::*; let a = vals.0.ty(&self.body.local_decls, self.tcx); let b = vals.1.ty(&self.body.local_decls, self.tcx); match op { Offset => { check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..)); if b != self.tcx.types.isize && b != self.tcx.types.usize { self.fail(location, format!("Cannot offset by non-isize type {:?}", b)); } } Eq | Lt | Le | Ne | Ge | Gt => { for x in [a, b] { check_kinds!( x, "Cannot compare type {:?}", ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::RawPtr(..) | ty::FnPtr(..) ) } // The function pointer types can have lifetimes if !self.mir_assign_valid_types(a, b) { self.fail( location, format!("Cannot compare unequal types {:?} and {:?}", a, b), ); } } Shl | Shr => { for x in [a, b] { check_kinds!( x, "Cannot shift non-integer type {:?}", ty::Uint(..) | ty::Int(..) ) } } BitAnd | BitOr | BitXor => { for x in [a, b] { check_kinds!( x, "Cannot perform bitwise op on type {:?}", ty::Uint(..) | ty::Int(..) | ty::Bool ) } if a != b { self.fail( location, format!( "Cannot perform bitwise op on unequal types {:?} and {:?}", a, b ), ); } } Add | Sub | Mul | Div | Rem => { for x in [a, b] { check_kinds!( x, "Cannot perform op on type {:?}", ty::Uint(..) | ty::Int(..) | ty::Float(..) ) } if a != b { self.fail( location, format!("Cannot perform op on unequal types {:?} and {:?}", a, b), ); } } } } Rvalue::UnaryOp(op, operand) => { let a = operand.ty(&self.body.local_decls, self.tcx); match op { UnOp::Neg => { check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..)) } UnOp::Not => { check_kinds!( a, "Cannot binary not type {:?}", ty::Int(..) | ty::Uint(..) | ty::Bool ); } } } Rvalue::ShallowInitBox(operand, _) => { let a = operand.ty(&self.body.local_decls, self.tcx); check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..)); } _ => {} } self.super_rvalue(rvalue, location); } fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { match &statement.kind { StatementKind::Assign(box (dest, rvalue)) => { // LHS and RHS of the assignment must have the same type. let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty; let right_ty = rvalue.ty(&self.body.local_decls, self.tcx); if !self.mir_assign_valid_types(right_ty, left_ty) { self.fail( location, format!( "encountered `{:?}` with incompatible types:\n\ left-hand side has type: {}\n\ right-hand side has type: {}", statement.kind, left_ty, right_ty, ), ); } // FIXME(JakobDegen): Check this for all rvalues, not just this one. if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue { // The sides of an assignment must not alias. Currently this just checks whether // the places are identical. if dest == src { self.fail( location, "encountered `Assign` statement with overlapping memory", ); } } } StatementKind::AscribeUserType(..) => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`AscribeUserType` should have been removed after drop lowering phase", ); } } StatementKind::FakeRead(..) => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`FakeRead` should have been removed after drop lowering phase", ); } } StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { ref src, ref dst, ref count, }) => { let src_ty = src.ty(&self.body.local_decls, self.tcx); let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) { src_deref.ty } else { self.fail( location, format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty), ); return; }; let dst_ty = dst.ty(&self.body.local_decls, self.tcx); let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) { dst_deref.ty } else { self.fail( location, format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty), ); return; }; // since CopyNonOverlapping is parametrized by 1 type, // we only need to check that they are equal and not keep an extra parameter. if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) { self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty)); } let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx); if op_cnt_ty != self.tcx.types.usize { self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty)) } } StatementKind::SetDiscriminant { place, .. } => { if self.mir_phase < MirPhase::Deaggregated { self.fail(location, "`SetDiscriminant`is not allowed until deaggregation"); } let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind(); if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) { self.fail( location, format!( "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}", pty ), ); } } StatementKind::Deinit(..) => { if self.mir_phase < MirPhase::Deaggregated { self.fail(location, "`Deinit`is not allowed until deaggregation"); } } StatementKind::Retag(_, _) => { // FIXME(JakobDegen) The validator should check that `self.mir_phase < // DropsLowered`. However, this causes ICEs with generation of drop shims, which // seem to fail to set their `MirPhase` correctly. } StatementKind::StorageLive(..) | StatementKind::StorageDead(..) | StatementKind::Coverage(_) | StatementKind::Nop => {} } self.super_statement(statement, location); } fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { match &terminator.kind { TerminatorKind::Goto { target } => { self.check_edge(location, *target, EdgeKind::Normal); } TerminatorKind::SwitchInt { targets, switch_ty, discr } => { let ty = discr.ty(&self.body.local_decls, self.tcx); if ty != *switch_ty { self.fail( location, format!( "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}", ty, switch_ty, ), ); } let target_width = self.tcx.sess.target.pointer_width; let size = Size::from_bits(match switch_ty.kind() { ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(), ty::Int(int) => int.normalize(target_width).bit_width().unwrap(), ty::Char => 32, ty::Bool => 1, other => bug!("unhandled type: {:?}", other), }); for (value, target) in targets.iter() { if Scalar::<()>::try_from_uint(value, size).is_none() { self.fail( location, format!("the value {:#x} is not a proper {:?}", value, switch_ty), ) } self.check_edge(location, target, EdgeKind::Normal); } self.check_edge(location, targets.otherwise(), EdgeKind::Normal); self.value_cache.clear(); self.value_cache.extend(targets.iter().map(|(value, _)| value)); let all_len = self.value_cache.len(); self.value_cache.sort_unstable(); self.value_cache.dedup(); let has_duplicates = all_len != self.value_cache.len(); if has_duplicates { self.fail( location, format!( "duplicated values in `SwitchInt` terminator: {:?}", terminator.kind, ), ); } } TerminatorKind::Drop { target, unwind, .. } => { self.check_edge(location, *target, EdgeKind::Normal); if let Some(unwind) = unwind { self.check_edge(location, *unwind, EdgeKind::Unwind); } } TerminatorKind::DropAndReplace { target, unwind, .. } => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`DropAndReplace` should have been removed during drop elaboration", ); } self.check_edge(location, *target, EdgeKind::Normal); if let Some(unwind) = unwind { self.check_edge(location, *unwind, EdgeKind::Unwind); } } TerminatorKind::Call { func, args, destination, cleanup, .. } => { let func_ty = func.ty(&self.body.local_decls, self.tcx); match func_ty.kind() { ty::FnPtr(..) | ty::FnDef(..) => {} _ => self.fail( location, format!("encountered non-callable type {} in `Call` terminator", func_ty), ), } if let Some((_, target)) = destination { self.check_edge(location, *target, EdgeKind::Normal); } if let Some(cleanup) = cleanup { self.check_edge(location, *cleanup, EdgeKind::Unwind); } // The call destination place and Operand::Move place used as an argument might be // passed by a reference to the callee. Consequently they must be non-overlapping. // Currently this simply checks for duplicate places. self.place_cache.clear(); if let Some((destination, _)) = destination { self.place_cache.push(destination.as_ref()); } for arg in args { if let Operand::Move(place) = arg { self.place_cache.push(place.as_ref()); } } let all_len = self.place_cache.len(); self.place_cache.sort_unstable(); self.place_cache.dedup(); let has_duplicates = all_len != self.place_cache.len(); if has_duplicates { self.fail( location, format!( "encountered overlapping memory in `Call` terminator: {:?}", terminator.kind, ), ); } } TerminatorKind::Assert { cond, target, cleanup, .. } => { let cond_ty = cond.ty(&self.body.local_decls, self.tcx); if cond_ty != self.tcx.types.bool { self.fail( location, format!( "encountered non-boolean condition of type {} in `Assert` terminator", cond_ty ), ); } self.check_edge(location, *target, EdgeKind::Normal); if let Some(cleanup) = cleanup { self.check_edge(location, *cleanup, EdgeKind::Unwind); } } TerminatorKind::Yield { resume, drop, .. } => { if self.body.generator.is_none() { self.fail(location, "`Yield` cannot appear outside generator bodies"); } if self.mir_phase >= MirPhase::GeneratorsLowered { self.fail(location, "`Yield` should have been replaced by generator lowering"); } self.check_edge(location, *resume, EdgeKind::Normal); if let Some(drop) = drop { self.check_edge(location, *drop, EdgeKind::Normal); } } TerminatorKind::FalseEdge { real_target, imaginary_target } => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`FalseEdge` should have been removed after drop elaboration", ); } self.check_edge(location, *real_target, EdgeKind::Normal); self.check_edge(location, *imaginary_target, EdgeKind::Normal); } TerminatorKind::FalseUnwind { real_target, unwind } => { if self.mir_phase >= MirPhase::DropsLowered { self.fail( location, "`FalseUnwind` should have been removed after drop elaboration", ); } self.check_edge(location, *real_target, EdgeKind::Normal); if let Some(unwind) = unwind { self.check_edge(location, *unwind, EdgeKind::Unwind); } } TerminatorKind::InlineAsm { destination, cleanup, .. } => { if let Some(destination) = destination { self.check_edge(location, *destination, EdgeKind::Normal); } if let Some(cleanup) = cleanup { self.check_edge(location, *cleanup, EdgeKind::Unwind); } } TerminatorKind::GeneratorDrop => { if self.body.generator.is_none() { self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies"); } if self.mir_phase >= MirPhase::GeneratorsLowered { self.fail( location, "`GeneratorDrop` should have been replaced by generator lowering", ); } } TerminatorKind::Resume | TerminatorKind::Abort => { let bb = location.block; if !self.body.basic_blocks()[bb].is_cleanup { self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block") }
TerminatorKind::Return => { let bb = location.block; if self.body.basic_blocks()[bb].is_cleanup { self.fail(location, "Cannot `Return` from cleanup basic block") } } TerminatorKind::Unreachable => {} } self.super_terminator(terminator, location); } fn visit_source_scope(&mut self, scope: &SourceScope) { if self.body.source_scopes.get(*scope).is_none() { self.tcx.sess.diagnostic().delay_span_bug( self.body.span, &format!( "broken MIR in {:?} ({}):\ninvalid source scope {:?}", self.body.source.instance, self.when, scope, ), ); } } }
}
main.rs
fn main() { println!("Hello, world!"); } pub struct Solution{} pub struct Solution1{} pub struct Solution2{} use std::collections::HashSet; impl Solution { pub fn single_number(nums: Vec<i32>) -> i32 { let mut temp = HashSet::new(); for i in 0..nums.len() { if temp.contains(&nums[i]) { temp.remove(&nums[i]); } else { temp.insert(&nums[i]); } } for x in temp.iter() { return **x; } 0 } } /* 执行结果: 通过 显示详情 执行用时 : 4 ms , 在所有 rust 提交中击败了 17.02% 的用户 内存消耗 : 2.5 MB , 在所有 rust 提交中击败了 100.00% 的用户 */ use std::iter::FromIterator; impl Solution1 { pub fn single_number(nums: Vec<i32>) -> i32 { let sum1:i32 = nums.iter().sum(); let temp: HashSet<i32> = HashSet::from_iter(nums); let sum2:i32 = temp.iter().sum(); 2*sum2 - sum1 } } /* 执行结果: 通过 显示详情 执行用时 : 0 ms , 在所有 rust 提交中击败了 100.00% 的用户 内存消耗 : 2.6 MB , 在所有 rust 提交中击败了 100.00% 的用户 */ use std::collections::HashMap; impl Solution2 { pub fn single_number(nums: Vec<i32>) -> i32 { let mut map = HashMap::new(); for i in nums { if map.get(&i) != None { map.remove(&i);
lse { map.insert(i, 1); } } for key in map.keys() { return *key; } 0 } } /* 执行结果: 通过 显示详情 执行用时 : 4 ms , 在所有 rust 提交中击败了 17.02% 的用户 内存消耗 : 2.6 MB , 在所有 rust 提交中击败了 57.89% 的用户 */
} e
mersenne.rs
use crate::Msg; use seed::prelude::*; extern crate num_bigint; extern crate num_traits; pub mod mersenne_utils { extern crate num_bigint; extern crate num_traits; use num_bigint::{BigInt, ToBigInt}; use num_bigint::{BigUint, ToBigUint}; use num_traits::{Num, Pow, ToPrimitive}; #[derive(Clone)] pub struct Mersenne { pub n: u64, pub p: u64, pub digits: u64, pub discovery: String, } #[derive(Clone)] pub struct MersenneDownload { pub n: u64, pub p: u64, } pub fn mersennes_discovery_dates(n: usize) -> String { let mersennes_discovery_dates: Vec<Mersenne> = mersennes(); mersennes_discovery_dates[n].discovery.to_owned() } pub fn mersennes() -> Vec<Mersenne> { vec![ //vec![p,digits] Mersenne { n: 0, p: 0, digits: 0, discovery: String::from(""), }, //faux zero entry to make things easier when reading this vector Mersenne { n: 1, p: 2, digits: 1, discovery: String::from("500BC"), }, Mersenne { n: 2, p: 3, digits: 1, discovery: String::from("500BC"), }, Mersenne { n: 3, p: 5, digits: 2, discovery: String::from("275BC"), }, Mersenne { n: 4, p: 7, digits: 3, discovery: String::from("275BC"), }, Mersenne { n: 5, p: 13, digits: 4, discovery: String::from("1456"), }, Mersenne { n: 6, p: 17, digits: 6, discovery: String::from("1588"), }, Mersenne { n: 7, p: 19, digits: 6, discovery: String::from("1588"), }, Mersenne { n: 8, p: 31, digits: 10, discovery: String::from("1772"), }, Mersenne { n: 9, p: 61, digits: 19, discovery: String::from("1883"), }, Mersenne { n: 10, p: 89, digits: 27, discovery: String::from("1911"), }, Mersenne { n: 11, p: 107, digits: 33, discovery: String::from("1914"), }, Mersenne { n: 12, p: 127, digits: 39, discovery: String::from("1876"), }, Mersenne { n: 13, p: 521, digits: 157, discovery: String::from("30-Jan-1952"), }, Mersenne { n: 14, p: 607, digits: 183, discovery: String::from("30-Jan-1952"), }, Mersenne { n: 15, p: 1279, digits: 386, discovery: String::from("26-Jun-1952"), }, Mersenne { n: 16, p: 2203, digits: 664, discovery: String::from(" 7-Oct-1952"), }, Mersenne { n: 17, p: 2281, digits: 687, discovery: String::from(" 9-Oct-1952"), }, Mersenne { n: 18, p: 3217, digits: 969, discovery: String::from(" 8-Sep-1957"), }, Mersenne { n: 19, p: 4253, digits: 1281, discovery: String::from(" 3-Nov-1961"), }, Mersenne { n: 20, p: 4423, digits: 1332, discovery: String::from(" 3-Nov-1961"), }, Mersenne { n: 21, p: 9689, digits: 2917, discovery: String::from("11-May-1963"), }, Mersenne { n: 22, p: 9941, digits: 2993, discovery: String::from("16-May-1963"), }, Mersenne { n: 23, p: 11213, digits: 3376, discovery: String::from(" 2-Jun-1963"), }, Mersenne { n: 24, p: 19937, digits: 6002, discovery: String::from(" 4-Mar-1971"), }, Mersenne { n: 25, p: 21701, digits: 6533, discovery: String::from("30-Oct-1978"), }, Mersenne { n: 26, p: 23209, digits: 6987, discovery: String::from(" 9-Feb-1979"), }, Mersenne { n: 27, p: 44497, digits: 13395, discovery: String::from(" 8-Apr-1979"), }, Mersenne { n: 28, p: 86243, digits: 25962, discovery: String::from("25-Sep-1982"), }, Mersenne { n: 29, p: 110503, digits: 33265, discovery: String::from("28-Jan-1988"), }, Mersenne { n: 30, p: 132049, digits: 39751, discovery: String::from("20-Sep-1983"), }, Mersenne { n: 31, p: 216091, digits: 65050, discovery: String::from(" 6-Sep-1985"), }, Mersenne { n: 32, p: 756839, digits: 227832, discovery: String::from("19-Feb-1992"), }, Mersenne { n: 33, p: 859433, digits: 258716, discovery: String::from("10-Jan-1994"), }, Mersenne { n: 34, p: 1257787, digits: 378632, discovery: String::from(" 3-Sep-1996"), }, Mersenne { n: 35, p: 1398269, digits: 420921, discovery: String::from("12-Nov-1996"), }, Mersenne { n: 36, p: 2976221, digits: 895832, discovery: String::from("24-Aug-1997"), }, Mersenne { n: 37, p: 3021377, digits: 909526, discovery: String::from("27-Jan-1998"), }, Mersenne { n: 38, p: 6972593, digits: 2098960, discovery: String::from(" 1-Jun-1999"), }, Mersenne { n: 39, p: 13466917, digits: 4053946, discovery: String::from("14-Nov-2001"), }, Mersenne { n: 40, p: 20996011, digits: 6320430, discovery: String::from("17-Nov-2003"), }, Mersenne { n: 41, p: 24036583, digits: 7235733, discovery: String::from("28-May-2004"), }, Mersenne { n: 42, p: 25964951, digits: 7816230, discovery: String::from("26-Feb-2005"), }, Mersenne { n: 43, p: 30402457, digits: 9152052, discovery: String::from("15-Dec-2005"), }, Mersenne { n: 44, p: 32582657, digits: 9808358, discovery: String::from(" 4-Sep-2006"), }, Mersenne { n: 45, p: 37156667, digits: 11185272, discovery: String::from("23-Aug-2008"), }, Mersenne { n: 46, p: 42643801, digits: 12837064, discovery: String::from("06-Sep-2009"), }, Mersenne { n: 47, p: 43112609, digits: 12978189, discovery: String::from("12-Apr-2008"), }, Mersenne { n: 48, p: 57885161, digits: 17425170, discovery: String::from("25-Jan-2013"), }, Mersenne { n: 49, p: 74207281, digits: 22338618, discovery: String::from("07-Jan-2016"), }, Mersenne { n: 50, p: 77232917, digits: 23249425, discovery: String::from("26-Dec-2017"), }, ] } pub fn nth_mersenne_prime(candidate: &str) -> u64 { let big_candidate: BigInt = num_bigint::BigInt::from_str_radix(&candidate, 10).unwrap(); let mersennes: Vec<Mersenne> = mersennes(); let mut answer: u64 = 0; let big_two: BigInt = 2.to_bigint().unwrap(); for n in 1..mersennes.len() { let mprime: BigInt = big_two.clone().pow(mersennes[n].p) - 1; if big_candidate == mprime { answer = n as u64; break; } else if big_candidate < mprime { break; } } answer } pub fn equation(p: u64) -> BigUint { let two: BigUint = 2.to_biguint().unwrap(); let power: BigUint = two.clone() << (p.to_usize().unwrap() - 1); power.clone() - 1.to_biguint().unwrap() } } pub fn render(model: &crate::Model) -> Node<Msg> { let mut html = vec![]; let mersennes = mersenne_utils::mersennes(); for n in 1..mersennes.len() { let mersenne_download = mersenne_utils::MersenneDownload { n: mersennes[n].n, p: mersennes[n].p, }; html.push(tr![ td![n.to_string()], td!["2", sup![mersennes[n].p.to_string()], "-1"], td![mersennes[n].digits.to_string()], td![mersenne_utils::mersennes_discovery_dates(n)], if model.download.n == mersennes[n].n { td![crate::utils::generate_file( model.download.n, mersenne_utils::equation(model.download.p) )] } else { td![button![ "Generate", mouse_ev("mouseup", move |event| Msg::GenerateMersenneDownload( event, mersenne_download )) ]] }, ]); } html.reverse(); div![ h1!["The Mersenne Numbers"], br![], br![], br![], table![ attrs! {At::Class => "mersennetable text"}, tbody![ tr![ td![b!["No."]], td![b!["Prime"]], td![b!["Digits"]], td![b!["Discovered"]], td![b!["Download"]] ], html ] ] ] } #[cfg(test)] use num_bigint::{BigUint, ToBigUint}; #[cfg(test)] use num_traits::Num; #[cfg(test)] mod tests { use super::*; #[test] fn mersennes_discovery_dates_test()
#[test] fn nth_mersenne_prime_test() { assert_eq!(mersenne_utils::nth_mersenne_prime("127"), 4); assert_eq!(mersenne_utils::nth_mersenne_prime("9"), 0); } #[test] fn mersenne_test() { let mersennes: Vec<mersenne_utils::Mersenne> = mersenne_utils::mersennes(); assert_eq!(mersennes[0].p, 0); assert_eq!(mersennes[0].digits, 0); assert_eq!(mersennes[2].p, 3); assert_eq!(mersennes[2].digits, 1); } #[test] fn equation_test() { assert_eq!(mersenne_utils::equation(2), 3.to_biguint().unwrap()); assert_eq!(mersenne_utils::equation(19), 524287.to_biguint().unwrap()); let number:BigUint = num_bigint::BigUint::from_str_radix("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", 10).unwrap(); assert_eq!(mersenne_utils::equation(521), number); } }
{ assert_eq!(mersenne_utils::mersennes_discovery_dates(0), ""); assert_eq!(mersenne_utils::mersennes_discovery_dates(50), "26-Dec-2017"); }
reacher_contextual.py
PYTHONPATH = '~/Documents/gym-extensions/' import sys sys.path.append(PYTHONPATH) import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env import os.path as osp from gym.envs.mujoco.reacher import ReacherEnv try: import mujoco_py from mujoco_py.mjlib import mjlib except ImportError as e: raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e)) import os import gym import gym_extensions class ReacherContextualEnv(ReacherEnv): def __init__(self, *args, **kwargs): ReacherEnv.__init__(self) # the context is a 4-dim vector [x1, y1, x2, y2] # (x1,y1) - coords of the tip of reacher; (x2,y2) - coords of the target self.context = np.array([0.1, 0.1, 0.1, 0.1]) self.policy_type = "" self.context_high = np.array([ i*2 for i in self.context]) self.context_low = np.array([ -i*2 for i in self.context]) # the params in the context can't be less or equal to zero! self.bias = 0 self.weights = [0]*self.observation_space.shape[0] def _step(self, action): state, reward, done, _ = super(ReacherContextualEnv, self)._step(action) return state, reward, done, {} def change_context(self, context_vector): # the context is a 4-dim vector [x1, y1, x2, y2] # (x1,y1) - coords of the tip of reacher; (x2,y2) - coords of the target qpos = np.array(context_vector) qvel = self.init_qvel qvel[-2:] = 0 self.set_state(qpos, qvel) def
(self, policy_type): self.policy_type = policy_type def context_space_info(self): context_info_dict = {} context_info_dict['context_vals'] = self.context context_info_dict['context_dims'] = len(self.context) context_info_dict['context_high'] = self.context_high.tolist() context_info_dict['context_low' ] = self.context_low.tolist() context_info_dict['state_dims' ] = self.observation_space.shape[0] # I need to know what the size of the action vector I need to pass to the transition function context_info_dict['action_dims' ] = self.action_space.shape[0] context_info_dict['action_space'] = 'continuous' context_info_dict['state_high' ] = self.observation_space.high.tolist() context_info_dict['state_low' ] = self.observation_space.low.tolist() context_info_dict['action_high' ] = self.action_space.high.tolist() context_info_dict['action_low' ] = self.action_space.low.tolist() return context_info_dict if __name__ == "__main__": import time #env = gym.make('Reacher-v1') env = gym.make('HopperContextual-v0') #env = gym.make('PusherContextual-v0') for i_episode in range(500): env.reset() while True: goal = np.random.uniform(low=-.25, high=.25, size=4) if np.linalg.norm(goal) < 2: break #env.unwrapped.change_context(goal) #print 'target', env.unwrapped.get_body_com("target") #print 'qpos', env.unwrapped.model.data.qpos time.sleep(2) #print env.unwrapped.context_space_info()['action_dims'] #print env.unwrapped.context_space_info() #print env.unwrapped.weights #print env.unwrapped.model.nq for t in range(500): env.render() action = env.action_space.sample() print 'action ', action observation, reward, done, info = env.step(action) #print observation if done: print("Episode finished after {} timesteps".format(t+1)) break
set_policy_type
database.rs
use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::path::Path; use std::sync::{Arc, RwLock}; use std::{fs, thread}; use std::io::{Read, Write, ErrorKind}; use chrono::{DateTime, Utc}; use crossbeam_channel::{Receiver, Sender}; use heed::CompactionOption; use heed::types::{Str, Unit, SerdeBincode}; use log::{debug, error}; use meilisearch_schema::Schema; use regex::Regex; use crate::{store, update, Index, MResult, Error}; pub type BoxUpdateFn = Box<dyn Fn(&str, update::ProcessedUpdateResult) + Send + Sync + 'static>; type ArcSwapFn = arc_swap::ArcSwapOption<BoxUpdateFn>; type SerdeDatetime = SerdeBincode<DateTime<Utc>>; pub type MainWriter<'a> = heed::RwTxn<'a, MainT>; pub type MainReader = heed::RoTxn<MainT>; pub type UpdateWriter<'a> = heed::RwTxn<'a, UpdateT>; pub type UpdateReader = heed::RoTxn<UpdateT>; const UNHEALTHY_KEY: &str = "_is_unhealthy"; const LAST_UPDATE_KEY: &str = "last-update"; pub struct MainT; pub struct UpdateT; pub struct Database { env: heed::Env, update_env: heed::Env, common_store: heed::PolyDatabase, indexes_store: heed::Database<Str, Unit>, indexes: RwLock<HashMap<String, (Index, thread::JoinHandle<MResult<()>>)>>, update_fn: Arc<ArcSwapFn>, } pub struct DatabaseOptions { pub main_map_size: usize, pub update_map_size: usize, } impl Default for DatabaseOptions { fn default() -> DatabaseOptions { DatabaseOptions { main_map_size: 100 * 1024 * 1024 * 1024, //100Gb update_map_size: 100 * 1024 * 1024 * 1024, //100Gb } } } macro_rules! r#break_try { ($expr:expr, $msg:tt) => { match $expr { core::result::Result::Ok(val) => val, core::result::Result::Err(err) => { log::error!(concat!($msg, ": {}"), err); break; } } }; } pub enum UpdateEvent { NewUpdate, MustClear, } pub type UpdateEvents = Receiver<UpdateEvent>; pub type UpdateEventsEmitter = Sender<UpdateEvent>; fn update_awaiter( receiver: UpdateEvents, env: heed::Env, update_env: heed::Env, index_uid: &str, update_fn: Arc<ArcSwapFn>, index: Index, ) -> MResult<()> { for event in receiver { // if we receive a *MustClear* event, clear the index and break the loop if let UpdateEvent::MustClear = event { let mut writer = env.typed_write_txn::<MainT>()?; let mut update_writer = update_env.typed_write_txn::<UpdateT>()?; store::clear(&mut writer, &mut update_writer, &index)?; writer.commit()?; update_writer.commit()?; debug!("store {} cleared", index_uid); break } loop { // We instantiate a *write* transaction to *block* the thread // until the *other*, notifiying, thread commits let result = update_env.typed_write_txn::<UpdateT>(); let update_reader = break_try!(result, "LMDB read transaction (update) begin failed"); // retrieve the update that needs to be processed let result = index.updates.first_update(&update_reader); let (update_id, update) = match break_try!(result, "pop front update failed") { Some(value) => value, None => { debug!("no more updates"); break; } }; // do not keep the reader for too long break_try!(update_reader.abort(), "aborting update transaction failed"); // instantiate a transaction to touch to the main env let result = env.typed_write_txn::<MainT>(); let mut main_writer = break_try!(result, "LMDB nested write transaction failed"); // try to apply the update to the database using the main transaction let result = update::update_task(&mut main_writer, &index, update_id, update); let status = break_try!(result, "update task failed"); // commit the main transaction if the update was successful, abort it otherwise if status.error.is_none() { break_try!(main_writer.commit(), "commit nested transaction failed"); } else { break_try!(main_writer.abort(), "abborting nested transaction failed"); } // now that the update has been processed we can instantiate // a transaction to move the result to the updates-results store let result = update_env.typed_write_txn::<UpdateT>(); let mut update_writer = break_try!(result, "LMDB write transaction begin failed"); // definitely remove the update from the updates store index.updates.del_update(&mut update_writer, update_id)?; // write the result of the updates-results store let updates_results = index.updates_results; let result = updates_results.put_update_result(&mut update_writer, update_id, &status); // always commit the main transaction, even if the update was unsuccessful break_try!(result, "update result store commit failed"); break_try!(update_writer.commit(), "update transaction commit failed"); // call the user callback when the update and the result are written consistently if let Some(ref callback) = *update_fn.load() { (callback)(index_uid, status); } } } debug!("update loop system stopped"); Ok(()) } /// Ensures Meilisearch version is compatible with the database, returns an error versions mismatch. /// If create is set to true, a VERSION file is created with the current version. fn version_guard(path: &Path, create: bool) -> MResult<()> { let current_version_major = env!("CARGO_PKG_VERSION_MAJOR"); let current_version_minor = env!("CARGO_PKG_VERSION_MINOR"); let current_version_patch = env!("CARGO_PKG_VERSION_PATCH"); let version_path = path.join("VERSION"); match File::open(&version_path) { Ok(mut file) => { let mut version = String::new(); file.read_to_string(&mut version)?; // Matches strings like XX.XX.XX let re = Regex::new(r"(\d+).(\d+).(\d+)").unwrap(); // Make sure there is a result let version = re .captures_iter(&version) .next() .ok_or(Error::VersionMismatch("bad VERSION file".to_string()))?; // the first is always the complete match, safe to unwrap because we have a match let version_major = version.get(1).unwrap().as_str(); let version_minor = version.get(2).unwrap().as_str(); if version_major != current_version_major || version_minor != current_version_minor { return Err(Error::VersionMismatch(format!("{}.{}.XX", version_major, version_minor))); } } Err(error) => { match error.kind() { ErrorKind::NotFound => { if create { // when no version file is found, and we've been told to create one, // create a new file with the current version in it. let mut version_file = File::create(&version_path)?; version_file.write_all(format!("{}.{}.{}", current_version_major, current_version_minor, current_version_patch).as_bytes())?; } else { // when no version file is found and we were not told to create one, this // means that the version is inferior to the one this feature was added in. return Err(Error::VersionMismatch(format!("<0.12.0"))); } } _ => return Err(error.into()) } } } Ok(()) } impl Database { pub fn open_or_create(path: impl AsRef<Path>, options: DatabaseOptions) -> MResult<Database> { let main_path = path.as_ref().join("main"); let update_path = path.as_ref().join("update"); //create db directory fs::create_dir_all(&path)?; // create file only if main db wasn't created before (first run) version_guard(path.as_ref(), !main_path.exists() && !update_path.exists())?; fs::create_dir_all(&main_path)?; let env = heed::EnvOpenOptions::new() .map_size(options.main_map_size) .max_dbs(3000) .open(main_path)?; fs::create_dir_all(&update_path)?; let update_env = heed::EnvOpenOptions::new() .map_size(options.update_map_size) .max_dbs(3000) .open(update_path)?; let common_store = env.create_poly_database(Some("common"))?; let indexes_store = env.create_database::<Str, Unit>(Some("indexes"))?; let update_fn = Arc::new(ArcSwapFn::empty()); // list all indexes that needs to be opened let mut must_open = Vec::new(); let reader = env.read_txn()?; for result in indexes_store.iter(&reader)? { let (index_uid, _) = result?; must_open.push(index_uid.to_owned()); } reader.abort()?; // open the previously aggregated indexes let mut indexes = HashMap::new(); for index_uid in must_open { let (sender, receiver) = crossbeam_channel::unbounded(); let index = match store::open(&env, &update_env, &index_uid, sender.clone())? { Some(index) => index, None => { log::warn!( "the index {} doesn't exist or has not all the databases", index_uid ); continue; } }; let env_clone = env.clone(); let update_env_clone = update_env.clone(); let index_clone = index.clone(); let name_clone = index_uid.clone(); let update_fn_clone = update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); // send an update notification to make sure that // possible pre-boot updates are consumed sender.send(UpdateEvent::NewUpdate).unwrap(); let result = indexes.insert(index_uid, (index, handle)); assert!( result.is_none(), "The index should not have been already open" ); } Ok(Database { env, update_env, common_store, indexes_store, indexes: RwLock::new(indexes), update_fn, }) } pub fn open_index(&self, name: impl AsRef<str>) -> Option<Index> { let indexes_lock = self.indexes.read().unwrap(); match indexes_lock.get(name.as_ref()) { Some((index, ..)) => Some(index.clone()), None => None, } } pub fn is_indexing(&self, reader: &UpdateReader, index: &str) -> MResult<Option<bool>> { match self.open_index(&index) { Some(index) => index.current_update_id(&reader).map(|u| Some(u.is_some())), None => Ok(None), } } pub fn create_index(&self, name: impl AsRef<str>) -> MResult<Index> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.entry(name.to_owned()) { Entry::Occupied(_) => Err(crate::Error::IndexAlreadyExists), Entry::Vacant(entry) => { let (sender, receiver) = crossbeam_channel::unbounded(); let index = store::create(&self.env, &self.update_env, name, sender)?; let mut writer = self.env.typed_write_txn::<MainT>()?; self.indexes_store.put(&mut writer, name, &())?; index.main.put_name(&mut writer, name)?; index.main.put_created_at(&mut writer)?; index.main.put_updated_at(&mut writer)?; index.main.put_schema(&mut writer, &Schema::new())?; let env_clone = self.env.clone(); let update_env_clone = self.update_env.clone(); let index_clone = index.clone(); let name_clone = name.to_owned(); let update_fn_clone = self.update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); writer.commit()?; entry.insert((index.clone(), handle)); Ok(index) } } } pub fn delete_index(&self, name: impl AsRef<str>) -> MResult<bool> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.remove_entry(name) { Some((name, (index, handle))) => { // remove the index name from the list of indexes // and clear all the LMDB dbi let mut writer = self.env.write_txn()?; self.indexes_store.delete(&mut writer, &name)?; writer.commit()?; // send a stop event to the update loop of the index index.updates_notifier.send(UpdateEvent::MustClear).unwrap(); drop(indexes_lock); // join the update loop thread to ensure it is stopped handle.join().unwrap()?; Ok(true) } None => Ok(false), } } pub fn set_update_callback(&self, update_fn: BoxUpdateFn) { let update_fn = Some(Arc::new(update_fn)); self.update_fn.swap(update_fn); } pub fn unset_update_callback(&self) { self.update_fn.swap(None); } pub fn main_read_txn(&self) -> MResult<MainReader> { Ok(self.env.typed_read_txn::<MainT>()?) } pub(crate) fn main_write_txn(&self) -> MResult<MainWriter> { Ok(self.env.typed_write_txn::<MainT>()?) } /// Calls f providing it with a writer to the main database. After f is called, makes sure the /// transaction is commited. Returns whatever result f returns. pub fn main_write<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&mut MainWriter) -> Result<R, E>, E: From<Error>, { let mut writer = self.main_write_txn()?; let result = f(&mut writer)?; writer.commit().map_err(Error::Heed)?; Ok(result) } /// provides a context with a reader to the main database. experimental. pub fn main_read<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&MainReader) -> Result<R, E>, E: From<Error>, { let reader = self.main_read_txn()?; let result = f(&reader)?; reader.abort().map_err(Error::Heed)?; Ok(result) } pub fn update_read_txn(&self) -> MResult<UpdateReader> { Ok(self.update_env.typed_read_txn::<UpdateT>()?) } pub(crate) fn update_write_txn(&self) -> MResult<heed::RwTxn<UpdateT>> { Ok(self.update_env.typed_write_txn::<UpdateT>()?) } /// Calls f providing it with a writer to the main database. After f is called, makes sure the /// transaction is commited. Returns whatever result f returns. pub fn update_write<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&mut UpdateWriter) -> Result<R, E>, E: From<Error>, { let mut writer = self.update_write_txn()?; let result = f(&mut writer)?; writer.commit().map_err(Error::Heed)?; Ok(result) } /// provides a context with a reader to the update database. experimental. pub fn update_read<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&UpdateReader) -> Result<R, E>, E: From<Error>, { let reader = self.update_read_txn()?; let result = f(&reader)?; reader.abort().map_err(Error::Heed)?; Ok(result) } pub fn copy_and_compact_to_path<P: AsRef<Path>>(&self, path: P) -> MResult<(File, File)> { let path = path.as_ref(); let env_path = path.join("main"); let env_update_path = path.join("update"); fs::create_dir(&env_path)?; fs::create_dir(&env_update_path)?;
let env_update_path = env_update_path.join("data.mdb"); match self.update_env.copy_to_path(env_update_path, CompactionOption::Enabled) { Ok(update_env_file) => Ok((env_file, update_env_file)), Err(e) => { fs::remove_file(env_path)?; Err(e.into()) }, } } pub fn indexes_uids(&self) -> Vec<String> { let indexes = self.indexes.read().unwrap(); indexes.keys().cloned().collect() } pub(crate) fn common_store(&self) -> heed::PolyDatabase { self.common_store } pub fn last_update(&self, reader: &heed::RoTxn<MainT>) -> MResult<Option<DateTime<Utc>>> { match self.common_store() .get::<_, Str, SerdeDatetime>(reader, LAST_UPDATE_KEY)? { Some(datetime) => Ok(Some(datetime)), None => Ok(None), } } pub fn set_last_update(&self, writer: &mut heed::RwTxn<MainT>, time: &DateTime<Utc>) -> MResult<()> { self.common_store() .put::<_, Str, SerdeDatetime>(writer, LAST_UPDATE_KEY, time)?; Ok(()) } pub fn set_healthy(&self, writer: &mut heed::RwTxn<MainT>) -> MResult<()> { let common_store = self.common_store(); common_store.delete::<_, Str>(writer, UNHEALTHY_KEY)?; Ok(()) } pub fn set_unhealthy(&self, writer: &mut heed::RwTxn<MainT>) -> MResult<()> { let common_store = self.common_store(); common_store.put::<_, Str, Unit>(writer, UNHEALTHY_KEY, &())?; Ok(()) } pub fn get_health(&self, reader: &heed::RoTxn<MainT>) -> MResult<Option<()>> { let common_store = self.common_store(); Ok(common_store.get::<_, Str, Unit>(&reader, UNHEALTHY_KEY)?) } pub fn compute_stats(&self, writer: &mut MainWriter, index_uid: &str) -> MResult<()> { let index = match self.open_index(&index_uid) { Some(index) => index, None => { error!("Impossible to retrieve index {}", index_uid); return Ok(()); } }; let schema = match index.main.schema(&writer)? { Some(schema) => schema, None => return Ok(()), }; let all_documents_fields = index .documents_fields_counts .all_documents_fields_counts(&writer)?; // count fields frequencies let mut fields_frequency = HashMap::<_, usize>::new(); for result in all_documents_fields { let (_, attr, _) = result?; if let Some(field_id) = schema.indexed_pos_to_field_id(attr) { *fields_frequency.entry(field_id).or_default() += 1; } } // convert attributes to their names let frequency: HashMap<_, _> = fields_frequency .into_iter() .filter_map(|(a, c)| schema.name(a).map(|name| (name.to_string(), c))) .collect(); index .main .put_fields_distribution(writer, &frequency) } } #[cfg(test)] mod tests { use super::*; use crate::bucket_sort::SortResult; use crate::criterion::{self, CriteriaBuilder}; use crate::update::{ProcessedUpdateResult, UpdateStatus}; use crate::settings::Settings; use crate::{Document, DocumentId}; use serde::de::IgnoredAny; use std::sync::mpsc; #[test] fn valid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn invalid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Failed { content }) if content.error.is_some()); } #[test] fn ignored_words_too_long() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name"], "displayedAttributes": ["name"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "s̷̡̢̡̧̺̜̞͕͉͉͕̜͔̟̼̥̝͍̟̖͔͔̪͉̲̹̝̣̖͎̞̤̥͓͎̭̩͕̙̩̿̀̋̅̈́̌́̏̍̄̽͂̆̾̀̿̕̚̚͜͠͠ͅͅļ̵̨̨̨̰̦̻̳̖̳͚̬̫͚̦͖͈̲̫̣̩̥̻̙̦̱̼̠̖̻̼̘̖͉̪̜̠̙͖̙̩͔̖̯̩̲̿̽͋̔̿̍̓͂̍̿͊͆̃͗̔̎͐͌̾̆͗́̆̒̔̾̅̚̚͜͜ͅͅī̵̛̦̅̔̓͂͌̾́͂͛̎̋͐͆̽̂̋̋́̾̀̉̓̏̽́̑̀͒̇͋͛̈́̃̉̏͊̌̄̽̿̏̇͘̕̚̕p̶̧̛̛̖̯̗͕̝̗̭̱͙̖̗̟̟̐͆̊̂͐̋̓̂̈́̓͊̆͌̾̾͐͋͗͌̆̿̅͆̈́̈́̉͋̍͊͗̌̓̅̈̎̇̃̎̈́̉̐̋͑̃͘̕͘d̴̢̨̛͕̘̯͖̭̮̝̝̐̊̈̅̐̀͒̀́̈́̀͌̽͛͆͑̀̽̿͛̃̋̇̎̀́̂́͘͠͝ǫ̵̨̛̮̩̘͚̬̯̖̱͍̼͑͑̓̐́̑̿̈́̔͌̂̄͐͝ģ̶̧̜͇̣̭̺̪̺̖̻͖̮̭̣̙̻͒͊͗̓̓͒̀̀ͅ", }); additions.update_document(doc1); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn add_schema_attributes_at_end() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let _update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description", "age", "sex"], "displayedAttributes": ["name", "description", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", "age": 21, "sex": "Male", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", "age": 23, "sex": "Male", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); // even try to search for a document let reader = db.main_read_txn().unwrap(); let SortResult {documents, .. } = index.query_builder().query(&reader, Some("21 "), 0..20).unwrap(); assert_matches!(documents.len(), 1); reader.abort().unwrap(); // try to introduce attributes in the middle of the schema let settings = { let data = r#" { "searchableAttributes": ["name", "description", "city", "age", "sex"], "displayedAttributes": ["name", "description", "city", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn deserialize_documents() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(0)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(1)) .unwrap(); assert!(document.is_some()); } #[test] fn partial_document_update() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description", "id"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(0)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(1)) .unwrap(); assert!(document.is_some()); reader.abort().unwrap(); let mut partial_additions = index.documents_partial_addition(); // DocumentId(7900334843754999545) let partial_doc1 = serde_json::json!({ "id": 123, "description": "I am the new Marvin", }); // DocumentId(8367468610878465872) let partial_doc2 = serde_json::json!({ "id": 234, "description": "I am the new Kevin", }); partial_additions.update_document(partial_doc1); partial_additions.update_document(partial_doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = partial_additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(0)) .unwrap(); let new_doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "I am the new Marvin", }); assert_eq!(document, Some(new_doc1)); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(1)) .unwrap(); let new_doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "I am the new Kevin", }); assert_eq!(document, Some(new_doc2)); } #[test] fn delete_index() { let dir = tempfile::tempdir().unwrap(); let database = Arc::new(Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap()); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let db_cloned = database.clone(); let update_fn = move |name: &str, update: ProcessedUpdateResult| { // try to open index to trigger a lock let _ = db_cloned.open_index(name); sender.send(update.update_id).unwrap() }; // create the index let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // add documents to the index let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // delete the index let deleted = database.delete_index("test").unwrap(); assert!(deleted); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let result = database.open_index("test"); assert!(result.is_none()); } #[test] fn check_number_ordering() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "rankingRules": [ "typo", "words", "proximity", "attribute", "wordsPosition", "exactness", "desc(release_date)" ], "searchableAttributes": ["name", "release_date"], "displayedAttributes": ["name", "release_date"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.to_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Kevin the first", "release_date": -10000, }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin the second", "release_date": 10000, }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let reader = db.main_read_txn().unwrap(); let schema = index.main.schema(&reader).unwrap().unwrap(); let ranked_map = index.main.ranked_map(&reader).unwrap().unwrap(); let criteria = CriteriaBuilder::new() .add( criterion::SortByAttr::lower_is_better(&ranked_map, &schema, "release_date") .unwrap(), ) .add(criterion::DocumentId) .build(); let builder = index.query_builder_with_criteria(criteria); let SortResult {documents, .. } = builder.query(&reader, Some("Kevin"), 0..20).unwrap(); let mut iter = documents.into_iter(); assert_matches!( iter.next(), Some(Document { id: DocumentId(0), .. }) ); assert_matches!( iter.next(), Some(Document { id: DocumentId(1), .. }) ); assert_matches!(iter.next(), None); } }
let env_path = env_path.join("data.mdb"); let env_file = self.env.copy_to_path(&env_path, CompactionOption::Enabled)?;
custom_logger.rs
/*! Using `env_logger` to drive a custom logger. Before running this example, try setting the `MY_LOG_LEVEL` environment variable to `info`: ```no_run,shell $ export MY_LOG_LEVEL='info' ``` If you only want to change the way logs are formatted, look at the `custom_format` example. */ #[macro_use] extern crate log; extern crate env_logger; use env_logger::filter::Filter; use log::{Log, Metadata, Record, SetLoggerError}; struct MyLogger { inner: Filter } impl MyLogger { fn new() -> MyLogger { use env_logger::filter::Builder;
let mut builder = Builder::from_env("MY_LOG_LEVEL"); MyLogger { inner: builder.build() } } fn init() -> Result<(), SetLoggerError> { let logger = Self::new(); log::set_max_level(logger.inner.filter()); log::set_boxed_logger(Box::new(logger)) } } impl Log for MyLogger { fn enabled(&self, metadata: &Metadata) -> bool { self.inner.enabled(metadata) } fn log(&self, record: &Record) { // Check if the record is matched by the logger before logging if self.inner.matches(record) { println!("{} - {}", record.level(), record.args()); } } fn flush(&self) { } } fn main() { MyLogger::init().unwrap(); info!("a log from `MyLogger`"); }
Live.d.ts
export declare const Live: ComponentType<CustomIconProps>;
import { ComponentType } from 'react'; import { CustomIconProps } from '@monorail/visualComponents/icon/Icon';
lockd.go
package main // export GOPATH=~/6.824 // go build lockd.go // go build lockc.go
// // on Athena, use /tmp/myname-a and /tmp/myname-b // instead of a and b. import "time" import "6.824/lockservice" import "os" import "fmt" func main() { if len(os.Args) == 4 && os.Args[1] == "-p" { lockservice.StartServer(os.Args[2], os.Args[3], true) } else if len(os.Args) == 4 && os.Args[1] == "-b" { lockservice.StartServer(os.Args[2], os.Args[3], false) } else { fmt.Printf("Usage: lockd -p|-b primaryport backupport\n") os.Exit(1) } for { time.Sleep(100 * time.Second) } }
// ./lockd -p a b & // ./lockd -b a b & // ./lockc -l a b lx // ./lockc -u a b lx
test_summary.py
import numpy as np from microsim.opencl.ramp.summary import Summary from microsim.opencl.ramp.snapshot import Snapshot def
(): npeople = 50 + 34 + 101 + 551 summary = Summary(snapshot=Snapshot.random(nplaces=10, npeople=npeople, nslots=10), max_time=20) time = 10 statuses = np.concatenate(( np.full(50, 0), np.full(34, 1), np.full(101, 4), np.full(551, 6), )) np.random.shuffle(statuses) summary.update(time, statuses) assert summary.total_counts[0][time] == 50 assert summary.total_counts[1][time] == 34 assert summary.total_counts[2][time] == 0 assert summary.total_counts[3][time] == 0 assert summary.total_counts[4][time] == 101 assert summary.total_counts[5][time] == 0 assert summary.total_counts[6][time] == 551
test_summary_update
base58bench_test.go
// Copyright (c) 2013-2014 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package base58_test import ( "bytes" "testing" "github.com/qtumatomicswap/qtumutil/base58" ) func BenchmarkBase58Encode(b *testing.B) { b.StopTimer() data := bytes.Repeat([]byte{0xff}, 5000) b.SetBytes(int64(len(data))) b.StartTimer() for i := 0; i < b.N; i++ {
} func BenchmarkBase58Decode(b *testing.B) { b.StopTimer() data := bytes.Repeat([]byte{0xff}, 5000) encoded := base58.Encode(data) b.SetBytes(int64(len(encoded))) b.StartTimer() for i := 0; i < b.N; i++ { base58.Decode(encoded) } }
base58.Encode(data) }
__main__.py
import siteScripts.timeout.scraper as timeoutScraper import logging from webscraper.models.landmark import Landmark
def main(): # File to save landmarks f = "landmarks.csv" # Scrapers timeOutLandmarks = timeoutScraper.scrape() # Save Data saveLandmarksCSV(timeOutLandmarks, f) if __name__ == '__main__': logging.config.fileConfig(fname="./logs/logging.conf", disable_existing_loggers=False) logger = logging.getLogger(__name__) logger.info("Let's Begin") main()
from webscraper.services.csv import saveLandmarksCSV
ixgbe.rs
use std::collections::VecDeque; use std::error::Error; use std::mem; use std::os::unix::io::RawFd; use std::path::Path; use std::ptr; use std::rc::Rc; use std::thread; use std::time::{Duration, Instant}; use crate::constants::*; use crate::memory::*; use crate::vfio::*; use crate::pci::pci_map_resource; use crate::vfio::VFIO_PCI_BAR0_REGION_INDEX; use crate::DeviceStats; use crate::IxyDevice; use crate::MAX_QUEUES; const DRIVER_NAME: &str = "ixy-ixgbe"; const PKT_BUF_ENTRY_SIZE: usize = 2048; const MIN_MEMPOOL_SIZE: usize = 4096; const NUM_RX_QUEUE_ENTRIES: usize = 512; const NUM_TX_QUEUE_ENTRIES: usize = 512; const TX_CLEAN_BATCH: usize = 32; fn wrap_ring(index: usize, ring_size: usize) -> usize { (index + 1) & (ring_size - 1) } pub struct IxgbeDevice { pci_addr: String, addr: *mut u8, len: usize, num_rx_queues: u16, num_tx_queues: u16, rx_queues: Vec<IxgbeRxQueue>, tx_queues: Vec<IxgbeTxQueue>, vfio: bool, vfio_fd: RawFd, } struct IxgbeRxQueue { descriptors: *mut ixgbe_adv_rx_desc, num_descriptors: usize, pool: Rc<Mempool>, bufs_in_use: Vec<usize>, rx_index: usize, } struct IxgbeTxQueue { descriptors: *mut ixgbe_adv_tx_desc, num_descriptors: usize, pool: Option<Rc<Mempool>>, bufs_in_use: VecDeque<usize>, clean_index: usize, tx_index: usize, } impl IxyDevice for IxgbeDevice { /// Returns an initialized `IxgbeDevice` on success. /// /// # Panics /// Panics if `num_rx_queues` or `num_tx_queues` exceeds `MAX_QUEUES`. fn init( pci_addr: &str, num_rx_queues: u16, num_tx_queues: u16, ) -> Result<IxgbeDevice, Box<dyn Error>> { if unsafe { libc::getuid() } != 0 { warn!("not running as root, this will probably fail"); } assert!( num_rx_queues <= MAX_QUEUES, "cannot configure {} rx queues: limit is {}", num_rx_queues, MAX_QUEUES ); assert!( num_tx_queues <= MAX_QUEUES, "cannot configure {} tx queues: limit is {}", num_tx_queues, MAX_QUEUES ); // Check if the NIC is IOMMU enabled... let vfio = Path::new(&format!("/sys/bus/pci/devices/{}/iommu_group", pci_addr)).exists(); let device_fd: RawFd; let (addr, len) = if vfio { device_fd = vfio_init(pci_addr)?; vfio_map_region(device_fd, VFIO_PCI_BAR0_REGION_INDEX)? } else { pci_map_resource(pci_addr)? }; // initialize RX and TX queue let rx_queues = Vec::with_capacity(num_rx_queues as usize); let tx_queues = Vec::with_capacity(num_tx_queues as usize); // create the IxyDevice let mut dev = IxgbeDevice { pci_addr: pci_addr.to_string(), addr, len, num_rx_queues, num_tx_queues, rx_queues, tx_queues, vfio, vfio_fd: unsafe { VFIO_CONTAINER_FILE_DESCRIPTOR }, }; dev.reset_and_init(pci_addr)?; Ok(dev) } /// Returns the driver's name of this device. fn get_driver_name(&self) -> &str { DRIVER_NAME } /// Returns the card's iommu capability. fn is_card_iommu_capable(&self) -> bool { self.vfio } /// Returns VFIO container file descriptor or [`None`] if IOMMU is not available. fn get_vfio_container(&self) -> Option<RawFd> { if self.vfio { Some(self.vfio_fd) } else { None } } /// Returns the pci address of this device. fn get_pci_addr(&self) -> &str { &self.pci_addr } /// Returns the mac address of this device. fn get_mac_addr(&self) -> [u8; 6] { let low = self.get_reg32(IXGBE_RAL(0)); let high = self.get_reg32(IXGBE_RAH(0)); [ (low & 0xff) as u8, (low >> 8 & 0xff) as u8, (low >> 16 & 0xff) as u8, (low >> 24) as u8, (high & 0xff) as u8, (high >> 8 & 0xff) as u8, ] } /// Sets the mac address of this device. fn set_mac_addr(&self, mac: [u8; 6]) { let low: u32 = u32::from(mac[0]) + (u32::from(mac[1]) << 8) + (u32::from(mac[2]) << 16) + (u32::from(mac[3]) << 24); let high: u32 = u32::from(mac[4]) + (u32::from(mac[5]) << 8); self.set_reg32(IXGBE_RAL(0), low); self.set_reg32(IXGBE_RAH(0), high); } /// Pushes up to `num_packets` received `Packet`s onto `buffer`. fn rx_batch( &mut self, queue_id: u32, buffer: &mut VecDeque<Packet>, num_packets: usize, ) -> usize { let mut rx_index; let mut last_rx_index; let mut received_packets = 0; { let queue = self .rx_queues .get_mut(queue_id as usize) .expect("invalid rx queue id"); rx_index = queue.rx_index; last_rx_index = queue.rx_index; for i in 0..num_packets { let desc = unsafe { queue.descriptors.add(rx_index) as *mut ixgbe_adv_rx_desc }; let status = unsafe { ptr::read_volatile(&mut (*desc).wb.upper.status_error as *mut u32) }; if (status & IXGBE_RXDADV_STAT_DD) == 0 { break; } if (status & IXGBE_RXDADV_STAT_EOP) == 0 { panic!("increase buffer size or decrease MTU") } let pool = &queue.pool; // get a free buffer from the mempool if let Some(buf) = pool.alloc_buf() { // replace currently used buffer with new buffer let buf = mem::replace(&mut queue.bufs_in_use[rx_index], buf); let p = Packet { addr_virt: pool.get_virt_addr(buf), addr_phys: pool.get_phys_addr(buf), len: unsafe { ptr::read_volatile(&(*desc).wb.upper.length as *const u16) as usize }, pool: pool.clone(), pool_entry: buf, }; #[cfg(all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse" ))] p.prefetch(Prefetch::Time1); buffer.push_back(p); unsafe { ptr::write_volatile( &mut (*desc).read.pkt_addr as *mut u64, pool.get_phys_addr(queue.bufs_in_use[rx_index]) as u64, ); ptr::write_volatile(&mut (*desc).read.hdr_addr as *mut u64, 0); } last_rx_index = rx_index; rx_index = wrap_ring(rx_index, queue.num_descriptors); received_packets = i + 1; } else { // break if there was no free buffer break; } } } if rx_index != last_rx_index { self.set_reg32(IXGBE_RDT(queue_id), last_rx_index as u32); self.rx_queues[queue_id as usize].rx_index = rx_index; } received_packets } /// Pops as many packets as possible from `packets` to put them into the device`s tx queue. fn tx_batch(&mut self, queue_id: u32, packets: &mut VecDeque<Packet>) -> usize { let mut sent = 0; { let mut queue = self .tx_queues .get_mut(queue_id as usize) .expect("invalid tx queue id"); let mut cur_index = queue.tx_index; let clean_index = clean_tx_queue(&mut queue); if queue.pool.is_none() { if let Some(packet) = packets.get(0) { queue.pool = Some(packet.pool.clone()); } } while let Some(packet) = packets.pop_front() { assert!( Rc::ptr_eq(queue.pool.as_ref().unwrap(), &packet.pool), "distinct memory pools for a single tx queue are not supported yet" ); let next_index = wrap_ring(cur_index, queue.num_descriptors); if clean_index == next_index { // tx queue of device is full, push packet back onto the // queue of to-be-sent packets packets.push_front(packet); break; } queue.tx_index = wrap_ring(queue.tx_index, queue.num_descriptors); unsafe { ptr::write_volatile( &mut (*queue.descriptors.add(cur_index)).read.buffer_addr as *mut u64, packet.get_phys_addr() as u64, ); ptr::write_volatile( &mut (*queue.descriptors.add(cur_index)).read.cmd_type_len as *mut u32, IXGBE_ADVTXD_DCMD_EOP | IXGBE_ADVTXD_DCMD_RS | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_DATA | packet.len() as u32, ); ptr::write_volatile( &mut (*queue.descriptors.add(cur_index)).read.olinfo_status as *mut u32, (packet.len() as u32) << IXGBE_ADVTXD_PAYLEN_SHIFT, ); } queue.bufs_in_use.push_back(packet.pool_entry); mem::forget(packet); cur_index = next_index; sent += 1; } } self.set_reg32( IXGBE_TDT(queue_id), self.tx_queues[queue_id as usize].tx_index as u32, ); sent } /// Reads the stats of this device into `stats`. fn read_stats(&self, stats: &mut DeviceStats) { let rx_pkts = u64::from(self.get_reg32(IXGBE_GPRC)); let tx_pkts = u64::from(self.get_reg32(IXGBE_GPTC)); let rx_bytes = u64::from(self.get_reg32(IXGBE_GORCL)) + (u64::from(self.get_reg32(IXGBE_GORCH)) << 32); let tx_bytes = u64::from(self.get_reg32(IXGBE_GOTCL)) + (u64::from(self.get_reg32(IXGBE_GOTCH)) << 32); stats.rx_pkts += rx_pkts; stats.tx_pkts += tx_pkts; stats.rx_bytes += rx_bytes; stats.tx_bytes += tx_bytes; } /// Resets the stats of this device. fn reset_stats(&mut self) { self.get_reg32(IXGBE_GPRC); self.get_reg32(IXGBE_GPTC); self.get_reg32(IXGBE_GORCL); self.get_reg32(IXGBE_GORCH); self.get_reg32(IXGBE_GOTCL); self.get_reg32(IXGBE_GOTCH); } /// Returns the link speed of this device. fn get_link_speed(&self) -> u16 { let speed = self.get_reg32(IXGBE_LINKS); if (speed & IXGBE_LINKS_UP) == 0 { return 0; } match speed & IXGBE_LINKS_SPEED_82599 { IXGBE_LINKS_SPEED_100_82599 => 100, IXGBE_LINKS_SPEED_1G_82599 => 1000, IXGBE_LINKS_SPEED_10G_82599 => 10000, _ => 0, } } } impl IxgbeDevice { /// Resets and initializes this device. fn reset_and_init(&mut self, pci_addr: &str) -> Result<(), Box<dyn Error>> { info!("resetting device {}", pci_addr); // section 4.6.3.1 - disable all interrupts self.set_reg32(IXGBE_EIMC, 0x7fff_ffff); // section 4.6.3.2 self.set_reg32(IXGBE_CTRL, IXGBE_CTRL_RST_MASK); self.wait_clear_reg32(IXGBE_CTRL, IXGBE_CTRL_RST_MASK); thread::sleep(Duration::from_millis(10)); // section 4.6.3.1 - disable interrupts again after reset self.set_reg32(IXGBE_EIMC, 0x7fff_ffff); let mac = self.get_mac_addr(); info!("initializing device {}", pci_addr); info!( "mac address: {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5] ); // section 4.6.3 - wait for EEPROM auto read completion self.wait_set_reg32(IXGBE_EEC, IXGBE_EEC_ARD); // section 4.6.3 - wait for dma initialization done self.wait_set_reg32(IXGBE_RDRXCTL, IXGBE_RDRXCTL_DMAIDONE); // skip last step from 4.6.3 - we don't want interrupts // section 4.6.4 - initialize link (auto negotiation) self.init_link(); // section 4.6.5 - statistical counters // reset-on-read registers, just read them once self.reset_stats(); // section 4.6.7 - init rx self.init_rx()?; // section 4.6.8 - init tx self.init_tx()?; for i in 0..self.num_rx_queues { self.start_rx_queue(i)?; } for i in 0..self.num_tx_queues { self.start_tx_queue(i)?; } // enable promisc mode by default to make testing easier self.set_promisc(true); // wait some time for the link to come up self.wait_for_link(); Ok(()) } // sections 4.6.7 /// Initializes the rx queues of this device. fn init_rx(&mut self) -> Result<(), Box<dyn Error>> { // disable rx while re-configuring it self.clear_flags32(IXGBE_RXCTRL, IXGBE_RXCTRL_RXEN); // section 4.6.11.3.4 - allocate all queues and traffic to PB0 self.set_reg32(IXGBE_RXPBSIZE(0), IXGBE_RXPBSIZE_128KB); for i in 1..8 { self.set_reg32(IXGBE_RXPBSIZE(i), 0); } // enable CRC offloading self.set_flags32(IXGBE_HLREG0, IXGBE_HLREG0_RXCRCSTRP); self.set_flags32(IXGBE_RDRXCTL, IXGBE_RDRXCTL_CRCSTRIP); // accept broadcast packets self.set_flags32(IXGBE_FCTRL, IXGBE_FCTRL_BAM); // configure queues, same for all queues for i in 0..self.num_rx_queues { debug!("initializing rx queue {}", i); // enable advanced rx descriptors self.set_reg32( IXGBE_SRRCTL(u32::from(i)), (self.get_reg32(IXGBE_SRRCTL(u32::from(i))) & !IXGBE_SRRCTL_DESCTYPE_MASK) | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF, ); // let nic drop packets if no rx descriptor is available instead of buffering them self.set_flags32(IXGBE_SRRCTL(u32::from(i)), IXGBE_SRRCTL_DROP_EN); // section 7.1.9 - setup descriptor ring let ring_size_bytes = (NUM_RX_QUEUE_ENTRIES) as usize * mem::size_of::<ixgbe_adv_rx_desc>(); let dma: Dma<ixgbe_adv_rx_desc> = Dma::allocate(ring_size_bytes, true)?; // initialize to 0xff to prevent rogue memory accesses on premature dma activation unsafe { memset(dma.virt as *mut u8, ring_size_bytes, 0xff); } self.set_reg32( IXGBE_RDBAL(u32::from(i)), (dma.phys as u64 & 0xffff_ffff) as u32, ); self.set_reg32(IXGBE_RDBAH(u32::from(i)), (dma.phys as u64 >> 32) as u32); self.set_reg32(IXGBE_RDLEN(u32::from(i)), ring_size_bytes as u32); debug!("rx ring {} phys addr: {:#x}", i, dma.phys); debug!("rx ring {} virt addr: {:p}", i, dma.virt); // set ring to empty at start self.set_reg32(IXGBE_RDH(u32::from(i)), 0); self.set_reg32(IXGBE_RDT(u32::from(i)), 0); let mempool_size = if NUM_RX_QUEUE_ENTRIES + NUM_TX_QUEUE_ENTRIES < MIN_MEMPOOL_SIZE { MIN_MEMPOOL_SIZE } else { NUM_RX_QUEUE_ENTRIES + NUM_TX_QUEUE_ENTRIES }; let mempool = Mempool::allocate(mempool_size as usize, PKT_BUF_ENTRY_SIZE).unwrap(); let rx_queue = IxgbeRxQueue { descriptors: dma.virt, pool: mempool, num_descriptors: NUM_RX_QUEUE_ENTRIES, rx_index: 0, bufs_in_use: Vec::with_capacity(NUM_RX_QUEUE_ENTRIES), }; self.rx_queues.push(rx_queue); } // last sentence of section 4.6.7 - set some magic bits self.set_flags32(IXGBE_CTRL_EXT, IXGBE_CTRL_EXT_NS_DIS); // probably a broken feature, this flag is initialized with 1 but has to be set to 0 for i in 0..self.num_rx_queues { self.clear_flags32(IXGBE_DCA_RXCTRL(u32::from(i)), 1 << 12); } // start rx self.set_flags32(IXGBE_RXCTRL, IXGBE_RXCTRL_RXEN); Ok(()) } // section 4.6.8 /// Initializes the tx queues of this device. fn init_tx(&mut self) -> Result<(), Box<dyn Error>> { // crc offload and small packet padding self.set_flags32(IXGBE_HLREG0, IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); // section 4.6.11.3.4 - set default buffer size allocations self.set_reg32(IXGBE_TXPBSIZE(0), IXGBE_TXPBSIZE_40KB); for i in 1..8 { self.set_reg32(IXGBE_TXPBSIZE(i), 0); } // required when not using DCB/VTd self.set_reg32(IXGBE_DTXMXSZRQ, 0xffff); self.clear_flags32(IXGBE_RTTDCS, IXGBE_RTTDCS_ARBDIS); // configure queues for i in 0..self.num_tx_queues { debug!("initializing tx queue {}", i); // section 7.1.9 - setup descriptor ring let ring_size_bytes = NUM_TX_QUEUE_ENTRIES as usize * mem::size_of::<ixgbe_adv_tx_desc>(); let dma: Dma<ixgbe_adv_tx_desc> = Dma::allocate(ring_size_bytes, true)?; unsafe { memset(dma.virt as *mut u8, ring_size_bytes, 0xff);
self.set_reg32( IXGBE_TDBAL(u32::from(i)), (dma.phys as u64 & 0xffff_ffff) as u32, ); self.set_reg32(IXGBE_TDBAH(u32::from(i)), (dma.phys as u64 >> 32) as u32); self.set_reg32(IXGBE_TDLEN(u32::from(i)), ring_size_bytes as u32); debug!("tx ring {} phys addr: {:#x}", i, dma.phys); debug!("tx ring {} virt addr: {:p}", i, dma.virt); // descriptor writeback magic values, important to get good performance and low PCIe overhead // see 7.2.3.4.1 and 7.2.3.5 for an explanation of these values and how to find good ones // we just use the defaults from DPDK here, but this is a potentially interesting point for optimizations let mut txdctl = self.get_reg32(IXGBE_TXDCTL(u32::from(i))); // there are no defines for this in constants.rs for some reason // pthresh: 6:0, hthresh: 14:8, wthresh: 22:16 txdctl &= !(0x3F | (0x3F << 8) | (0x3F << 16)); txdctl |= 36 | (8 << 8) | (4 << 16); self.set_reg32(IXGBE_TXDCTL(u32::from(i)), txdctl); let tx_queue = IxgbeTxQueue { descriptors: dma.virt, bufs_in_use: VecDeque::with_capacity(NUM_TX_QUEUE_ENTRIES), pool: None, num_descriptors: NUM_TX_QUEUE_ENTRIES, clean_index: 0, tx_index: 0, }; self.tx_queues.push(tx_queue); } // final step: enable DMA self.set_reg32(IXGBE_DMATXCTL, IXGBE_DMATXCTL_TE); Ok(()) } /// Sets the rx queues` descriptors and enables the queues. fn start_rx_queue(&mut self, queue_id: u16) -> Result<(), Box<dyn Error>> { debug!("starting rx queue {}", queue_id); { let queue = &mut self.rx_queues[queue_id as usize]; if queue.num_descriptors & (queue.num_descriptors - 1) != 0 { return Err("number of queue entries must be a power of 2".into()); } for i in 0..queue.num_descriptors { let pool = &queue.pool; let buf = match pool.alloc_buf() { Some(x) => x, None => return Err("failed to allocate rx descriptor".into()), }; unsafe { ptr::write_volatile( &mut (*queue.descriptors.add(i)).read.pkt_addr as *mut u64, pool.get_phys_addr(buf) as u64, ); ptr::write_volatile( &mut (*queue.descriptors.add(i)).read.hdr_addr as *mut u64, 0, ); } // we need to remember which descriptor entry belongs to which mempool entry queue.bufs_in_use.push(buf); } } let queue = &self.rx_queues[queue_id as usize]; // enable queue and wait if necessary self.set_flags32(IXGBE_RXDCTL(u32::from(queue_id)), IXGBE_RXDCTL_ENABLE); self.wait_set_reg32(IXGBE_RXDCTL(u32::from(queue_id)), IXGBE_RXDCTL_ENABLE); // rx queue starts out full self.set_reg32(IXGBE_RDH(u32::from(queue_id)), 0); // was set to 0 before in the init function self.set_reg32( IXGBE_RDT(u32::from(queue_id)), (queue.num_descriptors - 1) as u32, ); Ok(()) } /// Enables the tx queues. fn start_tx_queue(&mut self, queue_id: u16) -> Result<(), Box<dyn Error>> { debug!("starting tx queue {}", queue_id); { let queue = &mut self.tx_queues[queue_id as usize]; if queue.num_descriptors & (queue.num_descriptors - 1) != 0 { return Err("number of queue entries must be a power of 2".into()); } } // tx queue starts out empty self.set_reg32(IXGBE_TDH(u32::from(queue_id)), 0); self.set_reg32(IXGBE_TDT(u32::from(queue_id)), 0); // enable queue and wait if necessary self.set_flags32(IXGBE_TXDCTL(u32::from(queue_id)), IXGBE_TXDCTL_ENABLE); self.wait_set_reg32(IXGBE_TXDCTL(u32::from(queue_id)), IXGBE_TXDCTL_ENABLE); Ok(()) } // see section 4.6.4 /// Initializes the link of this device. fn init_link(&self) { // link auto-configuration register should already be set correctly, we're resetting it anyway self.set_reg32( IXGBE_AUTOC, (self.get_reg32(IXGBE_AUTOC) & !IXGBE_AUTOC_LMS_MASK) | IXGBE_AUTOC_LMS_10G_SERIAL, ); self.set_reg32( IXGBE_AUTOC, (self.get_reg32(IXGBE_AUTOC) & !IXGBE_AUTOC_10G_PMA_PMD_MASK) | IXGBE_AUTOC_10G_XAUI, ); // negotiate link self.set_flags32(IXGBE_AUTOC, IXGBE_AUTOC_AN_RESTART); // datasheet wants us to wait for the link here, but we can continue and wait afterwards } /// Waits for the link to come up. fn wait_for_link(&self) { info!("waiting for link"); let time = Instant::now(); let mut speed = self.get_link_speed(); while speed == 0 && time.elapsed().as_secs() < 10 { thread::sleep(Duration::from_millis(100)); speed = self.get_link_speed(); } info!("link speed is {} Mbit/s", self.get_link_speed()); } /// Enables or disables promisc mode of this device. fn set_promisc(&self, enabled: bool) { if enabled { info!("enabling promisc mode"); self.set_flags32(IXGBE_FCTRL, IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE); } else { info!("disabling promisc mode"); self.clear_flags32(IXGBE_FCTRL, IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE); } } /// Returns the register at `self.addr` + `reg`. /// /// # Panics /// /// Panics if `self.addr` + `reg` does not belong to the mapped memory of the pci device. fn get_reg32(&self, reg: u32) -> u32 { assert!( reg as usize <= self.len - 4 as usize, "memory access out of bounds" ); unsafe { ptr::read_volatile((self.addr as usize + reg as usize) as *mut u32) } } /// Sets the register at `self.addr` + `reg` to `value`. /// /// # Panics /// /// Panics if `self.addr` + `reg` does not belong to the mapped memory of the pci device. fn set_reg32(&self, reg: u32, value: u32) { assert!( reg as usize <= self.len - 4 as usize, "memory access out of bounds" ); unsafe { ptr::write_volatile((self.addr as usize + reg as usize) as *mut u32, value); } } /// Sets the `flags` at `self.addr` + `reg`. fn set_flags32(&self, reg: u32, flags: u32) { self.set_reg32(reg, self.get_reg32(reg) | flags); } /// Clears the `flags` at `self.addr` + `reg`. fn clear_flags32(&self, reg: u32, flags: u32) { self.set_reg32(reg, self.get_reg32(reg) & !flags); } /// Waits for `self.addr` + `reg` to clear `value`. fn wait_clear_reg32(&self, reg: u32, value: u32) { loop { let current = self.get_reg32(reg); if (current & value) == 0 { break; } thread::sleep(Duration::from_millis(100)); } } /// Waits for `self.addr` + `reg` to set `value`. fn wait_set_reg32(&self, reg: u32, value: u32) { loop { let current = self.get_reg32(reg); if (current & value) == value { break; } thread::sleep(Duration::from_millis(100)); } } } /// Removes multiples of `TX_CLEAN_BATCH` packets from `queue`. fn clean_tx_queue(queue: &mut IxgbeTxQueue) -> usize { let mut clean_index = queue.clean_index; let cur_index = queue.tx_index; loop { let mut cleanable = cur_index as i32 - clean_index as i32; if cleanable < 0 { cleanable += queue.num_descriptors as i32; } if cleanable < TX_CLEAN_BATCH as i32 { break; } let mut cleanup_to = clean_index + TX_CLEAN_BATCH - 1; if cleanup_to >= queue.num_descriptors { cleanup_to -= queue.num_descriptors; } let status = unsafe { ptr::read_volatile(&(*queue.descriptors.add(cleanup_to)).wb.status as *const u32) }; if (status & IXGBE_ADVTXD_STAT_DD) != 0 { if let Some(ref p) = queue.pool { if TX_CLEAN_BATCH as usize >= queue.bufs_in_use.len() { p.free_stack .borrow_mut() .extend(queue.bufs_in_use.drain(..)) } else { p.free_stack .borrow_mut() .extend(queue.bufs_in_use.drain(..TX_CLEAN_BATCH)) } } clean_index = wrap_ring(cleanup_to, queue.num_descriptors); } else { break; } } queue.clean_index = clean_index; clean_index }
}
wsgi.py
"""
WSGI config for OBB_Train_Station project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OBB_Train_Station.settings') application = get_wsgi_application()
dmn_3_0086.rs
/* * DMNTK - Decision Model and Notation Toolkit * * MIT license * * Copyright (c) 2018-2022 Dariusz Depta Engos Software * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Apache license, Version 2.0 * * Copyright (c) 2018-2022 Dariusz Depta Engos Software * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use super::super::*; use crate::model_evaluator::ModelEvaluator; use std::sync::Arc; lazy_static! { static ref MODEL_EVALUATOR: Arc<ModelEvaluator> = build_model_evaluator(dmntk_examples::DMN_3_0086); } #[test] #[ignore] fn
() { let ctx = context(r#"{A Person: {age: 21,name: "John Doe"}}"#); assert_decision(&MODEL_EVALUATOR, "A Decision Ctx with DT", &ctx, r#"null"#); } #[test] #[ignore] fn _0002() { let ctx = context(r#"{A Person: {age: 47,name: "John Doe"}}"#); assert_decision(&MODEL_EVALUATOR, "A Decision Ctx with DT", &ctx, r#"null"#); }
_0001
values.go
// W Go rozróżniane są następujące typy danych: typ znakowy, a co za tym idzie, // literał znakowy (`strings`), typ całkowity (`int`), // typ zmiennoprzecinkowy (`float`), typ logiczny (`boolean`) itp. package main import "fmt" func main() { //
Literały znakowe (strings) można łączyć ze sobą za pomocą znaku `+`. fmt.Println("go" + "lang") // Przykłady typów: całkowitego i zmiennoprzecinkowego. fmt.Println("1+1 =", 1+1) fmt.Println("7.0/3.0 =", 7.0/3.0) // Typ logiczny i przykłady wyrażeń Boole'a. fmt.Println(true && false) fmt.Println(true || false) fmt.Println(!true) }
front_end_common_edge_to_n_keys_mapper.py
# pacman imports from pacman.model.routing_info.\ dict_based_partitioned_partition_n_keys_map import \ DictBasedPartitionedPartitionNKeysMap # spinnMachine imports from spinn_machine.utilities.progress_bar import ProgressBar # front end common imports from spinn_front_end_common.abstract_models.\ abstract_provides_incoming_partition_constraints import \ AbstractProvidesIncomingPartitionConstraints from spinn_front_end_common.abstract_models.\ abstract_provides_n_keys_for_partition import \ AbstractProvidesNKeysForPartition from spinn_front_end_common.abstract_models.\ abstract_provides_outgoing_partition_constraints import \ AbstractProvidesOutgoingPartitionConstraints from spinn_front_end_common.utilities import exceptions class FrontEndCommonEdgeToNKeysMapper(object): """ Works out the number of keys needed for each edge """ def __call__(self, partitioned_graph, partitionable_graph=None, graph_mapper=None): # Generate an n_keys map for the graph and add constraints
@staticmethod def _check_constraints_equal(constraints, stored_constraints): """ :param constraints: :param stored_constraints: :return: """ for constraint in constraints: if constraint not in stored_constraints: raise exceptions.ConfigurationException( "Two edges within the same partition have different " "constraints. This is deemed an error. Please fix and " "try again") @staticmethod def _process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph): partitioned_edge = partition.edges[0] vertex_slice = graph_mapper.get_subvertex_slice( partitioned_edge.pre_subvertex) edge = graph_mapper.get_partitionable_edge_from_partitioned_edge( partitioned_edge) if not isinstance(edge.pre_vertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition( partition, vertex_slice.n_atoms) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_vertex.get_n_keys_for_partition( partition, graph_mapper)) constraints = list() if isinstance(edge.pre_vertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_vertex.get_outgoing_partition_constraints( partition, graph_mapper)) if isinstance(edge.post_vertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_vertex.get_incoming_partition_constraints( partition, graph_mapper)) constraints.extend( partitionable_graph.partition_from_vertex( edge.pre_vertex, partition_id).constraints) return constraints @staticmethod def _process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph): edge = partition.edges[0] if not isinstance(edge.pre_subvertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition(partition, 1) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_subvertex.get_n_keys_for_partition( partition, None)) constraints = list() if isinstance(edge.pre_subvertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_subvertex.get_outgoing_partition_constraints( partition, None)) if isinstance(edge.post_subvertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_subvertex.get_incoming_partition_constraints( partition, None)) constraints.extend( partitioned_graph.partition_from_vertex( edge.pre_subvertex, partition_id).constraints) return constraints
n_keys_map = DictBasedPartitionedPartitionNKeysMap() # generate progress bar progress_bar = ProgressBar( len(partitioned_graph.subvertices), "Deducing edge to number of keys map") # contains a partitionable vertex if partitionable_graph is not None and graph_mapper is not None: # iterate over each partition in the partitioned graph for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() else: for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() return {'n_keys_map': n_keys_map}
python_streaming_function.py
import base64 import json import logging import os import time import traceback from urllib.parse import urlparse, quote from botocore.auth import SigV4Auth from botocore.awsrequest import AWSRequest from botocore.credentials import get_credentials from botocore.endpoint import BotocoreHTTPSession from botocore.session import Session from boto3.dynamodb.types import TypeDeserializer # The following parameters are required to configure the ES cluster ES_ENDPOINT = os.environ['ES_ENDPOINT'] ES_REGION = os.environ['ES_REGION'] DEBUG = True if os.environ['DEBUG'] == "1" else False ES_USE_EXTERNAL_VERSIONING = True if os.environ['ES_USE_EXTERNAL_VERSIONING'] == "true" else False # ElasticSearch 6 deprecated having multiple mapping types in an index. Default to doc. DOC_TYPE = 'doc' ES_MAX_RETRIES = 3 # Max number of retries for exponential backoff logger = logging.getLogger() logger.setLevel(logging.DEBUG if DEBUG else logging.INFO) logger.info("Streaming to ElasticSearch") # custom encoder changes # - sets to lists class DDBTypesEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) # Subclass of boto's TypeDeserializer for DynamoDB to adjust for DynamoDB Stream format. class StreamTypeDeserializer(TypeDeserializer): def
(self, value): return float(value) def _deserialize_b(self, value): return value # Already in Base64 class ES_Exception(Exception): '''Capture status_code from request''' status_code = 0 payload = '' def __init__(self, status_code, payload): self.status_code = status_code self.payload = payload Exception.__init__( self, 'ES_Exception: status_code={}, payload={}'.format(status_code, payload)) # Low-level POST data to Amazon Elasticsearch Service generating a Sigv4 signed request def post_data_to_es(payload, region, creds, host, path, method='POST', proto='https://'): '''Post data to ES endpoint with SigV4 signed http headers''' req = AWSRequest(method=method, url=proto + host + quote(path), data=payload, headers={'Host': host, 'Content-Type': 'application/json'}) SigV4Auth(creds, 'es', region).add_auth(req) http_session = BotocoreHTTPSession() res = http_session.send(req.prepare()) if res.status_code >= 200 and res.status_code <= 299: return res._content else: raise ES_Exception(res.status_code, res._content) # High-level POST data to Amazon Elasticsearch Service with exponential backoff # according to suggested algorithm: http://docs.aws.amazon.com/general/latest/gr/api-retries.html def post_to_es(payload): '''Post data to ES cluster with exponential backoff''' # Get aws_region and credentials to post signed URL to ES es_region = ES_REGION or os.environ['AWS_REGION'] session = Session({'region': es_region}) creds = get_credentials(session) es_url = urlparse(ES_ENDPOINT) # Extract the domain name in ES_ENDPOINT es_endpoint = es_url.netloc or es_url.path # Post data with exponential backoff retries = 0 while retries < ES_MAX_RETRIES: if retries > 0: seconds = (2 ** retries) * .1 logger.debug('Waiting for %.1f seconds', seconds) time.sleep(seconds) try: es_ret_str = post_data_to_es( payload, es_region, creds, es_endpoint, '/_bulk') logger.debug('Return from ES: %s', es_ret_str) es_ret = json.loads(es_ret_str) if es_ret['errors']: logger.error( 'ES post unsuccessful, errors present, took=%sms', es_ret['took']) # Filter errors es_errors = [item for item in es_ret['items'] if item.get('index', {}).get('error')] logger.error('List of items with errors: %s', json.dumps(es_errors)) else: logger.info('ES post successful, took=%sms', es_ret['took']) break # Sending to ES was ok, break retry loop except ES_Exception as e: if (e.status_code >= 500) and (e.status_code <= 599): retries += 1 # Candidate for retry else: raise # Stop retrying, re-raise exception # Extracts the DynamoDB table from an ARN # ex: arn:aws:dynamodb:eu-west-1:123456789012:table/table-name/stream/2015-11-13T09:23:17.104 should return 'table-name' def get_table_name_from_arn(arn): return arn.split(':')[5].split('/')[1] # Compute a compound doc index from the key(s) of the object in lexicographic order: "k1=key_val1|k2=key_val2" def compute_doc_index(keys_raw, deserializer, formatIndex=False): index = [] for key in sorted(keys_raw): if formatIndex: index.append('{}={}'.format( key, deserializer.deserialize(keys_raw[key]))) else: index.append(deserializer.deserialize(keys_raw[key])) return '|'.join(map(str,index)) def _lambda_handler(event, context): logger.debug('Event: %s', event) records = event['Records'] ddb_deserializer = StreamTypeDeserializer() es_actions = [] # Items to be added/updated/removed from ES - for bulk API cnt_insert = cnt_modify = cnt_remove = 0 for record in records: # Handle both native DynamoDB Streams or Streams data from Kinesis (for manual replay) logger.debug('Record: %s', record) if record.get('eventSource') == 'aws:dynamodb': ddb = record['dynamodb'] ddb_table_name = get_table_name_from_arn(record['eventSourceARN']) doc_seq = ddb['SequenceNumber'] elif record.get('eventSource') == 'aws:kinesis': ddb = json.loads(base64.b64decode(record['kinesis']['data'])) ddb_table_name = ddb['SourceTable'] doc_seq = record['kinesis']['sequenceNumber'] else: logger.error('Ignoring non-DynamoDB event sources: %s', record.get('eventSource')) continue # Compute DynamoDB table, type and index for item doc_table = ddb_table_name.lower() doc_type = DOC_TYPE doc_table_parts = doc_table.split('-') doc_es_index_name = doc_table_parts[0] if len(doc_table_parts) > 0 else doc_table # Dispatch according to event TYPE event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE logger.debug('doc_table=%s, event_name=%s, seq=%s', doc_table, event_name, doc_seq) # Treat events from a Kinesis stream as INSERTs if event_name == 'AWS:KINESIS:RECORD': event_name = 'INSERT' is_ddb_insert_or_update = (event_name == 'INSERT') or (event_name == 'MODIFY') is_ddb_delete = event_name == 'REMOVE' image_name = 'NewImage' if is_ddb_insert_or_update else 'OldImage' if image_name not in ddb: logger.warning( 'Cannot process stream if it does not contain ' + image_name) continue logger.debug(image_name + ': %s', ddb[image_name]) # Deserialize DynamoDB type to Python types doc_fields = ddb_deserializer.deserialize({'M': ddb[image_name]}) # Sync enabled APIs do soft delete. We need to delete the record in ES if _deleted field is set if ES_USE_EXTERNAL_VERSIONING and event_name == 'MODIFY' and '_deleted' in doc_fields and doc_fields['_deleted']: is_ddb_insert_or_update = False is_ddb_delete = True # Update counters if event_name == 'INSERT': cnt_insert += 1 elif event_name == 'MODIFY': cnt_modify += 1 elif event_name == 'REMOVE': cnt_remove += 1 else: logger.warning('Unsupported event_name: %s', event_name) logger.debug('Deserialized doc_fields: %s', doc_fields) if ('Keys' in ddb): doc_id = compute_doc_index(ddb['Keys'], ddb_deserializer) else: logger.error('Cannot find keys in ddb record') # If DynamoDB INSERT or MODIFY, send 'index' to ES if is_ddb_insert_or_update: # Generate ES payload for item action = {'index': {'_index': doc_es_index_name, '_type': doc_type, '_id': doc_id}} # Add external versioning if necessary if ES_USE_EXTERNAL_VERSIONING and '_version' in doc_fields: action['index'].update([ ('version_type', 'external'), ('_version', doc_fields['_version']) ]) doc_fields.pop('_ttl', None) doc_fields.pop('_version', None) # Append ES Action line with 'index' directive es_actions.append(json.dumps(action)) # Append JSON payload es_actions.append(json.dumps(doc_fields, cls=DDBTypesEncoder)) # migration step remove old key if it exists if ('id' in doc_fields) and (event_name == 'MODIFY') : action = {'delete': {'_index': doc_es_index_name, '_type': doc_type, '_id': compute_doc_index(ddb['Keys'], ddb_deserializer, True)}} es_actions.append(json.dumps(action)) # If DynamoDB REMOVE, send 'delete' to ES elif is_ddb_delete: action = {'delete': {'_index': doc_es_index_name, '_type': doc_type, '_id': doc_id}} if ES_USE_EXTERNAL_VERSIONING and '_version' in doc_fields: action['delete'].update([ ('version_type', 'external'), ('_version', doc_fields['_version']) ]) # Action line with 'delete' directive es_actions.append(json.dumps(action)) # Prepare bulk payload es_actions.append('') # Add one empty line to force final \n es_payload = '\n'.join(es_actions) logger.info('Posting to ES: inserts=%s updates=%s deletes=%s, total_lines=%s, bytes_total=%s', cnt_insert, cnt_modify, cnt_remove, len(es_actions) - 1, len(es_payload)) post_to_es(es_payload) # Post to ES with exponential backoff # Global lambda handler - catches all exceptions to avoid dead letter in the DynamoDB Stream def lambda_handler(event, context): try: return _lambda_handler(event, context) except Exception: logger.error(traceback.format_exc())
_deserialize_n
pid_saver.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import json import logging import os import signal from typing import List from idb.common.constants import IDB_PID_PATH def save_pid(pid: int) -> None: pids = _get_pids() pids.append(pid) _write_pids(pids=pids) logging.debug(f"saved daemon pid {pid}") def remove_pid(pid: int) -> None: pids = _get_pids() if pids.count(pid) > 0: pids.remove(pid) _write_pids(pids=pids) logging.debug(f"removed daemon pid {pid}") def _write_pids(pids: List[int]) -> None: with open(IDB_PID_PATH, "w") as pid_file: json.dump(pids, pid_file) pid_file.flush() def _has_saved_pids() -> bool: pids = _get_pids() logging.debug(f"has saved pids {pids}") return len(pids) > 0 def _get_pids() -> List[int]: try: with open(IDB_PID_PATH) as pid_file: return json.load(pid_file) except Exception: return [] def
() -> None: if os.path.exists(IDB_PID_PATH): # Empty the file with open(IDB_PID_PATH, "wb", buffering=0) as pid_file: pid_file.flush() async def kill_saved_pids() -> None: if not _has_saved_pids(): logging.debug(f"no daemon pid found") return for pid in _get_pids(): try: os.kill(pid, signal.SIGTERM) logging.info(f"stopped daemon with pid {pid}") except OSError or ProcessLookupError: pass _clear_saved_pids()
_clear_saved_pids
test_loader.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from unittest.mock import patch, mock_open from nose.tools import assert_equals from django.test import TestCase from vaas.configuration.loader import YamlConfigLoader USER_HOME_PATH = '/user/path/.vaas' VAAS_APP_RESOURCES_PATH = '/vaas/app/resources' class YamlConfigLoaderTest(TestCase): def file_exists_side_effect(self, arg): return arg in self.file_existence and self.file_existence[arg] def setUp(self):
def test_should_init_search_paths_with_user_and_resources_paths_if_user_path_exists(self): self.file_existence = { USER_HOME_PATH: True } directories = YamlConfigLoader().config_directories assert_equals(2, len(directories)) assert_equals([USER_HOME_PATH, VAAS_APP_RESOURCES_PATH], directories) def test_should_determine_file_from_users_location_if_exists(self): expected_path = "{}/{}".format(USER_HOME_PATH, 'test.yaml') self.file_existence = { USER_HOME_PATH: True, expected_path: True } assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml')) def test_should_determine_file_from_resource_location_if_exists(self): expected_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml') self.file_existence = { USER_HOME_PATH: False, expected_path: True } assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml')) def test_should_not_determine_file_if_not_exists_in_any_location(self): resource_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml') self.file_existence = { USER_HOME_PATH: False, resource_path: False } assert_equals(None, YamlConfigLoader().determine_config_file('test.yaml')) @patch('builtins.open', mock_open(read_data="key1: value1\nkey2: value2")) def test_should_return_config_tree(self): expected_tree = {'key1': 'value1', 'key2': 'value2'} self.file_existence = { USER_HOME_PATH: False, "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml'): True } assert_equals(expected_tree, YamlConfigLoader().get_config_tree('test.yaml'))
self.file_existence = {} exists_patcher = patch('os.path.exists') file_exists_mock = exists_patcher.start() file_exists_mock.side_effect = self.file_exists_side_effect expand_patcher = patch('os.path.expanduser') expanduser_mock = expand_patcher.start() expanduser_mock.return_value = USER_HOME_PATH abspath_patcher = patch('os.path.abspath') abspath_mock = abspath_patcher.start() abspath_mock.return_value = VAAS_APP_RESOURCES_PATH self.addCleanup(exists_patcher.stop) self.addCleanup(expand_patcher.stop) self.addCleanup(abspath_patcher.stop)
csrf-demo.js
"use strict"; /* eslint-disable */ var baseUrl = "http://dev.mysite.com:3000"; var defaultUseLocalStorage = true; var useLocalStorage; var HEADER_TOKEN; // save header token if use localStorage is off var headerName = "x-csrf-jwt"; var firstPostHeaderName = "x-csrf-first-post"; var localHeaderSaveKey = "csrf-header-token"; var useLocalStorageFlagKey = "csrf-use-local-storage"; var initialHeaderTokenKey = "initial-csrf-header-token"; // // Save CSRF use localStorage flag to localStorage and allow user to toggle it // function saveUseLocalStorageFlag() { if (window.localStorage) { window.localStorage.setItem(useLocalStorageFlagKey, useLocalStorage ? "true" : ""); } } function initUseLocalStorageFlag() { if (useLocalStorage === undefined && window.localStorage) { var saveFlag = window.localStorage.getItem(useLocalStorageFlagKey); if (saveFlag !== null) { useLocalStorage = Boolean(saveFlag); } else { useLocalStorage = defaultUseLocalStorage; saveUseLocalStorageFlag(); } } } function
() { initUseLocalStorageFlag(); var statusElm = document.getElementById("useLocalStorage"); if (statusElm) { statusElm.innerText = useLocalStorage ? "on" : "off"; } } function toggleUseLocalStorage() { useLocalStorage = !useLocalStorage; setUseLocalStorageStatus(); saveUseLocalStorageFlag(); } // // Save header token into an internal state or to localStorage if it is turned on // function saveHeaderToken(token) { if (useLocalStorage && window.localStorage) { window.localStorage.setItem(localHeaderSaveKey, token); window.localStorage.removeItem(initialHeaderTokenKey); } else { HEADER_TOKEN = token; } } // // Retrieve header token from an internal state or the localStorage if it is turned on // function getHeaderToken() { if (!useLocalStorage || !window.localStorage) return { token: HEADER_TOKEN }; var initialHeaderToken = window.localStorage.getItem(initialHeaderTokenKey); if (initialHeaderToken !== null) { console.log("Using initial CSRF header token"); window.localStorage.removeItem(initialHeaderTokenKey); return { token: initialHeaderToken, firstPost: true }; } return { token: window.localStorage.getItem(localHeaderSaveKey) }; } // // Save header token from AJAX (fetch) response // function saveResponseHeader(res) { var h = res.headers.get(headerName); if (h) { console.log("csrf header token", h); saveHeaderToken(h); } else { console.log("no csrf header token received"); } } function testPost(error, retry) { console.log("testPost"); var headerToken = ""; var headers = { "content-type": "application/json" }; // simulate invalid CSRF token to trigger error response if (!error) { var header = getHeaderToken(); headers[headerName] = header.token; if (header.firstPost) { headers[firstPostHeaderName] = "1"; } } else { headers[headerName] = ""; } return fetch(baseUrl + "/api/1", { body: "{}", headers, // must set credentials for browser to send cookies // https://github.com/github/fetch#sending-cookies credentials: "same-origin", mode: "cors", method: "POST" }).then(res => { saveResponseHeader(res); // if response status is not 200, then retry once if (res.status !== 200 && !retry) { console.log("Response status is not 200, retrying."); return testPost(false, true); } return res.json().then(data => { console.log("testPost data", data); }); }); } function testGet() { console.log("testGet"); return fetch(baseUrl + "/api/2", { credentials: "same-origin" }).then(res => { saveResponseHeader(res); return res.json().then(data => { console.log("testGet data", data); }); }); }
setUseLocalStorageStatus
data-table-group-field-extractor-callback.model.ts
import { DataTableRow } from './data-table-row.model'; /** * Dynamic row span extractor callback * @param row Target data row
* @return Row span for current row */ export type DataTableDynamicRowSpanExtractorCallback<T> = (row: DataTableRow<T>) => number;
permissions.py
from collections import defaultdict, namedtuple AclPermission = namedtuple('AclPermission', 'app, action') # Null rule. Only useful in tests really as no access group should have this. NONE = AclPermission('None', 'None') # A special wildcard permission to use when checking if someone has access to # any admin, or if an admin is accessible by someone with any Admin:<something> # permission. ANY_ADMIN = AclPermission('Admin', '%') # Another special permission, that only few (2-3) admins have. This grants # access to anything. SUPERPOWERS = AclPermission('*', '*') # Can access admin-specific tools. ADMIN_TOOLS = AclPermission('Admin', 'Tools') # Can modify editorial content on the site. ADMIN_CURATION = AclPermission('Admin', 'Curation') # Can edit the properties of any add-on (pseduo-admin). ADDONS_EDIT = AclPermission('Addons', 'Edit') # Can view deleted add-ons in the API. ADDONS_VIEW_DELETED = AclPermission('Addons', 'ViewDeleted') # Can view only the reviewer tools. REVIEWER_TOOLS_VIEW = AclPermission('ReviewerTools', 'View') # These users gain access to the accounts API to super-create users. ACCOUNTS_SUPER_CREATE = AclPermission('Accounts', 'SuperCreate') # Can review a listed add-on. ADDONS_REVIEW = AclPermission('Addons', 'Review') # Can review an unlisted add-on. ADDONS_REVIEW_UNLISTED = AclPermission('Addons', 'ReviewUnlisted') # Can access add-ons post-review information. ADDONS_POST_REVIEW = AclPermission('Addons', 'PostReview') # Can submit a content review for a listed add-on. ADDONS_CONTENT_REVIEW = AclPermission('Addons', 'ContentReview') # Can edit the message of the day in the reviewer tools. ADDON_REVIEWER_MOTD_EDIT = AclPermission('AddonReviewerMOTD', 'Edit') # Can review a background theme (persona). THEMES_REVIEW = AclPermission('Personas', 'Review') # Can review a static theme. STATIC_THEMES_REVIEW = AclPermission('Addons', 'ThemeReview') # Can edit all collections. COLLECTIONS_EDIT = AclPermission('Collections', 'Edit') # Can contribute to community managed collection: COLLECTION_FEATURED_THEMES_ID COLLECTIONS_CONTRIBUTE = AclPermission('Collections', 'Contribute') # Can view statistics for all addons, regardless of privacy settings. STATS_VIEW = AclPermission('Stats', 'View') # Can submit experiments. EXPERIMENTS_SUBMIT = AclPermission('Experiments', 'submit') # Can localize all locales. LOCALIZER = AclPermission('Localizer', '%') # Can edit user accounts. USERS_EDIT = AclPermission('Users', 'Edit') # Can moderate add-on ratings submitted by users. RATINGS_MODERATE = AclPermission('Ratings', 'Moderate') # Can access advanced reviewer features meant for admins, such as disabling an # add-on or clearing needs admin review flags. REVIEWS_ADMIN = AclPermission('Reviews', 'Admin') # Can access advanced admin features, like deletion. ADMIN_ADVANCED = AclPermission('Admin', 'Advanced') # Can add/edit/delete DiscoveryItems. DISCOVERY_EDIT = AclPermission('Discovery', 'Edit') # Can list/access abuse reports ABUSEREPORTS_EDIT = AclPermission('AbuseReports', 'Edit') # All permissions, for easy introspection PERMISSIONS_LIST = [ x for x in vars().values() if isinstance(x, AclPermission)] # Mapping between django-style object permissions and our own. By default, # require superuser admins (which also have all other permissions anyway) to do # something, and then add some custom ones. DJANGO_PERMISSIONS_MAPPING = defaultdict(lambda: SUPERPOWERS) DJANGO_PERMISSIONS_MAPPING.update({ 'abuse.change_abusereport': ABUSEREPORTS_EDIT, 'abuse.delete_abusereport': ADMIN_ADVANCED, # Note that ActivityLog's ModelAdmin actually forbids deletion entirely. # This is just here to allow deletion of users, because django checks # foreign keys even though users are only soft-deleted and related objects # will be kept. 'activity.delete_activitylog': ADMIN_ADVANCED, 'addons.change_addon': ADDONS_EDIT,
# In addition, the modeladmin will also check for Addons:Edit and give them # read-only access to the changelist (obj=None passed to the # has_change_permission() method) 'addons.change_replacementaddon': ADMIN_CURATION, 'addons.add_replacementaddon': ADMIN_CURATION, 'addons.delete_replacementaddon': ADMIN_CURATION, 'bandwagon.change_collection': COLLECTIONS_EDIT, 'bandwagon.delete_collection': ADMIN_ADVANCED, 'discovery.add_discoveryitem': DISCOVERY_EDIT, 'discovery.change_discoveryitem': DISCOVERY_EDIT, 'discovery.delete_discoveryitem': DISCOVERY_EDIT, 'files.change_file': ADMIN_ADVANCED, 'reviewers.delete_reviewerscore': ADMIN_ADVANCED, 'users.change_userprofile': USERS_EDIT, 'users.delete_userprofile': ADMIN_ADVANCED, 'ratings.change_rating': RATINGS_MODERATE, 'ratings.delete_rating': ADMIN_ADVANCED, 'versions.change_version': ADMIN_ADVANCED, })
'addons.delete_addonuser': ADMIN_ADVANCED, # Users with Admin:Curation can do anything to ReplacementAddon.
api_op_DescribeStream.go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package iot import ( "context" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" "github.com/aws/aws-sdk-go-v2/private/protocol" ) type DescribeStreamInput struct { _ struct{} `type:"structure"` // The stream ID. // // StreamId is a required field StreamId *string `location:"uri" locationName:"streamId" min:"1" type:"string" required:"true"` } // String returns the string representation func (s DescribeStreamInput) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeStreamInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DescribeStreamInput"} if s.StreamId == nil { invalidParams.Add(aws.NewErrParamRequired("StreamId")) } if s.StreamId != nil && len(*s.StreamId) < 1 {
if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DescribeStreamInput) MarshalFields(e protocol.FieldEncoder) error { if s.StreamId != nil { v := *s.StreamId metadata := protocol.Metadata{} e.SetValue(protocol.PathTarget, "streamId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } type DescribeStreamOutput struct { _ struct{} `type:"structure"` // Information about the stream. StreamInfo *StreamInfo `locationName:"streamInfo" type:"structure"` } // String returns the string representation func (s DescribeStreamOutput) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DescribeStreamOutput) MarshalFields(e protocol.FieldEncoder) error { if s.StreamInfo != nil { v := s.StreamInfo metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "streamInfo", v, metadata) } return nil } const opDescribeStream = "DescribeStream" // DescribeStreamRequest returns a request value for making API operation for // AWS IoT. // // Gets information about a stream. // // // Example sending a request using DescribeStreamRequest. // req := client.DescribeStreamRequest(params) // resp, err := req.Send(context.TODO()) // if err == nil { // fmt.Println(resp) // } func (c *Client) DescribeStreamRequest(input *DescribeStreamInput) DescribeStreamRequest { op := &aws.Operation{ Name: opDescribeStream, HTTPMethod: "GET", HTTPPath: "/streams/{streamId}", } if input == nil { input = &DescribeStreamInput{} } req := c.newRequest(op, input, &DescribeStreamOutput{}) return DescribeStreamRequest{Request: req, Input: input, Copy: c.DescribeStreamRequest} } // DescribeStreamRequest is the request type for the // DescribeStream API operation. type DescribeStreamRequest struct { *aws.Request Input *DescribeStreamInput Copy func(*DescribeStreamInput) DescribeStreamRequest } // Send marshals and sends the DescribeStream API request. func (r DescribeStreamRequest) Send(ctx context.Context) (*DescribeStreamResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &DescribeStreamResponse{ DescribeStreamOutput: r.Request.Data.(*DescribeStreamOutput), response: &aws.Response{Request: r.Request}, } return resp, nil } // DescribeStreamResponse is the response type for the // DescribeStream API operation. type DescribeStreamResponse struct { *DescribeStreamOutput response *aws.Response } // SDKResponseMetdata returns the response metadata for the // DescribeStream request. func (r *DescribeStreamResponse) SDKResponseMetdata() *aws.Response { return r.response }
invalidParams.Add(aws.NewErrParamMinLen("StreamId", 1)) }
main.rs
mod solitaire; mod util; mod window_target; use robmikh_common::desktop::dispatcher_queue::DispatcherQueueControllerExtensions; use solitaire::Solitaire; use util::error::Result; use window_target::CompositionDesktopWindowTargetSource; use winit::{ event::{ElementState, Event, MouseButton, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }; use windows::{Foundation::Numerics::Vector2, System::DispatcherQueueController, UI::Composition::Compositor, Win32::System::WinRT::{RoInitialize, RO_INIT_SINGLETHREADED}};
let event_loop = EventLoop::new(); let window = WindowBuilder::new().build(&event_loop).unwrap(); window.set_title("Solitaire"); let compositor = Compositor::new()?; let target = window.create_window_target(&compositor, false)?; let root = compositor.CreateContainerVisual()?; root.SetRelativeSizeAdjustment(Vector2::new(1.0, 1.0))?; target.SetRoot(&root)?; let window_size = window.inner_size(); let window_size = Vector2::new(window_size.width as f32, window_size.height as f32); let mut game = Solitaire::new(&root, window_size)?; event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Wait; match event { Event::WindowEvent { event: WindowEvent::CloseRequested, window_id, } if window_id == window.id() => *control_flow = ControlFlow::Exit, Event::WindowEvent { event: WindowEvent::Resized(size), .. } => { let size = Vector2::new(size.width as f32, size.height as f32); //game.on_parent_size_changed(&size).unwrap(); } Event::WindowEvent { event: WindowEvent::CursorMoved { position, .. }, .. } => { let point = Vector2::new(position.x as f32, position.y as f32); //game.on_pointer_moved(&point).unwrap(); } Event::WindowEvent { event: WindowEvent::MouseInput { state, button, .. }, .. } => { if state == ElementState::Pressed { //game.on_pointer_pressed(button == MouseButton::Right, false) // .unwrap(); } } _ => (), } }); }
fn main() -> Result<()> { unsafe { RoInitialize(RO_INIT_SINGLETHREADED)? }; let _controller = DispatcherQueueController::create_dispatcher_queue_controller_for_current_thread()?;
termios.h.ts
# Copyright 2002-2014, The TenDRA Project. # Copyright 1997, United Kingdom Secretary of State for Defence. # # See doc/copyright/ for the full copyright terms. +IMPLEMENT "cae/xpg3", "termios.h.ts" ; /* * TODO: unsure of the type for these. posix/termios.h.ts has them as int: * * +CONST tcflag_t BRKINT, ICRNL, IGNBRK, IGNCR, IGNPAR, INLCR ; * +CONST tcflag_t INPCK, ISTRIP, IUCLC, IXANY, IXOFF, IXON, PARMRK ; * * +CONST tcflag_t OPOST, OLCUC, ONLCR, OCRNL, ONOCR, ONLRET ; * +CONST tcflag_t OFILL, NLDLY, CRDLY, TABDLY, BSDLY, VTDLY, FFDLY ; */
* XXX: tcsetattr() and <termios.h> pages disagree on 'const', we align with POSIX * +FUNC int tcsetattr ( int, int, struct termios * ) ; */
/*
storage.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storage import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/genericapiserver/registry/generic" genericregistry "k8s.io/kubernetes/pkg/genericapiserver/registry/generic/registry" "k8s.io/kubernetes/pkg/genericapiserver/registry/rest" "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/persistentvolume" ) type REST struct { *genericregistry.Store } // NewREST returns a RESTStorage object that will work against persistent volumes. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST)
// StatusREST implements the REST endpoint for changing the status of a persistentvolume. type StatusREST struct { store *genericregistry.Store } func (r *StatusREST) New() runtime.Object { return &api.PersistentVolume{} } // Get retrieves the object from the storage. It is required to support Patch. func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { return r.store.Get(ctx, name, options) } // Update alters the status subset of an object. func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { return r.store.Update(ctx, name, objInfo) }
{ store := &genericregistry.Store{ Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.PersistentVolume{} }, NewListFunc: func() runtime.Object { return &api.PersistentVolumeList{} }, ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*api.PersistentVolume).Name, nil }, PredicateFunc: persistentvolume.MatchPersistentVolumes, QualifiedResource: api.Resource("persistentvolumes"), WatchCacheSize: cachesize.GetWatchCacheSizeByResource("persistentvolumes"), CreateStrategy: persistentvolume.Strategy, UpdateStrategy: persistentvolume.Strategy, DeleteStrategy: persistentvolume.Strategy, ReturnDeletedObject: true, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: persistentvolume.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { panic(err) // TODO: Propagate error up } statusStore := *store statusStore.UpdateStrategy = persistentvolume.StatusStrategy return &REST{store}, &StatusREST{store: &statusStore} }
LogicalCores.android-emscripten-fuschia-linux-uclibc.rs
// This file is part of cpu-affinity. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/cpu-affinity/master/COPYRIGHT. No part of cpu-affinity, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2018 The developers of cpu-affinity. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/cpu-affinity/master/COPYRIGHT. impl LogicalCores { const _IsSettingProcessAffinitySupported: bool = true; #[cfg(any(target_os = "android"))] const _IsSettingThreadAffinitySupported: bool = false; #[cfg(any(target_os = "emscripten", target_os = "fuschia", target_os = "linux", target_env = "uclibc"))] const _IsSettingThreadAffinitySupported: bool = false; #[inline(always)] fn _set_process_affinity(&self, process_identifier: ProcessIdentifier) -> io::Result<()> {
#[inline(always)] fn _set_thread_affinity(&self, thread_identifier: ThreadIdentifier) -> io::Result<()> { self.to_cpu_set().set_thread_affinity(thread_identifier) } #[inline(always)] fn to_cpu_set(&self) -> CpuSet { let mut cpu_set = CpuSet::default(); for logical_core in self.0.iter() { cpu_set.set_hyper_thread(HyperThread::from(*logical_core)); } cpu_set } }
self.to_cpu_set().set_process_affinity(process_identifier) }
unix.rs
//! Unix-specific types for signal handling. //! //! This module is only defined on Unix platforms and contains the primary //! `Signal` type for receiving notifications of signals. #![cfg(unix)] use std::cell::UnsafeCell; use std::io; use std::mem; use std::ops::Deref; use std::sync::Once; use may::sync::mpsc::{self, Receiver, Sender}; use may::sync::Mutex; use libc::c_int; pub use libc::{SIGALRM, SIGHUP, SIGPIPE, SIGQUIT, SIGTRAP}; pub use libc::{SIGINT, SIGTERM, SIGUSR1, SIGUSR2}; // Number of different unix signals const SIGNUM: usize = 32; struct SignalInfo { // The ones interested in this signal recipients: Mutex<Vec<Box<Sender<()>>>>, init: Once, initialized: UnsafeCell<bool>, prev: UnsafeCell<libc::sigaction>, } impl Default for SignalInfo { fn default() -> SignalInfo { SignalInfo { init: Once::new(), initialized: UnsafeCell::new(false), recipients: Mutex::new(Vec::new()), prev: UnsafeCell::new(unsafe { mem::zeroed() }), } } } struct Globals { signals: [SignalInfo; SIGNUM], } static mut GLOBALS: *mut Globals = 0 as *mut Globals; fn globals() -> &'static Globals { static INIT: Once = Once::new(); unsafe { INIT.call_once(|| { let globals = Globals { signals: Default::default(), }; GLOBALS = Box::into_raw(Box::new(globals)); }); &*GLOBALS } } /// Our global signal handler for all signals registered by this module. /// /// The purpose of this signal handler is to primarily: /// /// 1. Flag that our specific signal was received (e.g. store an atomic flag) /// 2. Wake up driver tasks by writing a byte to a pipe /// /// Those two operations shoudl both be async-signal safe. After that's done we /// just try to call a previous signal handler, if any, to be "good denizens of /// the internet" extern "C" fn handler(signum: c_int, info: *mut libc::siginfo_t, ptr: *mut libc::c_void) { type FnSigaction = extern "C" fn(c_int, *mut libc::siginfo_t, *mut libc::c_void); type FnHandler = extern "C" fn(c_int); unsafe { let slot = match (*GLOBALS).signals.get(signum as usize) { Some(slot) => slot, None => return, }; // broadcast the signal for tx in slot.recipients.lock().unwrap().iter() { tx.send(()).unwrap(); } let fnptr = (*slot.prev.get()).sa_sigaction; if fnptr == 0 || fnptr == libc::SIG_DFL || fnptr == libc::SIG_IGN
if (*slot.prev.get()).sa_flags & libc::SA_SIGINFO == 0 { let action = mem::transmute::<usize, FnHandler>(fnptr); action(signum) } else { let action = mem::transmute::<usize, FnSigaction>(fnptr); action(signum, info, ptr) } } } /// Enable this module to receive signal notifications for the `signal` /// provided. /// /// This will register the signal handler if it hasn't already been registered, /// returning any error along the way if that fails. fn signal_enable(signal: c_int) -> io::Result<()> { let siginfo = match globals().signals.get(signal as usize) { Some(slot) => slot, None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")), }; unsafe { let mut err = None; siginfo.init.call_once(|| { let mut new: libc::sigaction = mem::zeroed(); new.sa_sigaction = handler as usize; new.sa_flags = libc::SA_RESTART | libc::SA_SIGINFO | libc::SA_NOCLDSTOP; if libc::sigaction(signal, &new, &mut *siginfo.prev.get()) != 0 { err = Some(io::Error::last_os_error()); } else { *siginfo.initialized.get() = true; } }); if let Some(err) = err { return Err(err); } if *siginfo.initialized.get() { Ok(()) } else { Err(io::Error::new( io::ErrorKind::Other, "failed to register signal handler", )) } } } /// An implementation of `Stream` for receiving a particular type of signal. /// /// This structure deref to mpsc::Receiver<()> and represents notifications /// of the current process receiving a particular signal. The signal being /// listened for is passed to `Signal::new`, and every signal is then /// yielded as each element for the stream. /// pub struct Signal { signal: c_int, // Used only as an identifier. We place the real sender into a Box, so it // stays on the same address forever. That gives us a unique pointer, so we // can use this to identify the sender in a Vec and delete it when we are // dropped. id: *const Sender<()>, rx: Receiver<()>, } // The raw pointer prevents the compiler from determining it as Send // automatically. But the only thing we use the raw pointer for is to identify // the correct Box to delete, not manipulate any data through that. unsafe impl Send for Signal {} unsafe impl Sync for Signal {} impl Signal { /// Creates a new stream which will receive notifications when the current /// process receives the signal `signal`. /// /// The `Signal` stream is an infinite stream which will receive /// notifications whenever a signal is received. More documentation can be /// found on `Signal` itself, but to reiterate: /// /// * Once a signal handler is registered with the process the underlying /// libc signal handler is never unregistered. /// /// A `Signal` stream can be created for a particular signal number /// multiple times. When a signal is received then all the associated /// channels will receive the signal notification. pub fn new(signal: c_int) -> io::Result<Signal> { // Turn the signal delivery on once we are ready for it signal_enable(signal)?; // One wakeup in a queue is enough, no need for us to buffer up any // more. let (tx, rx) = mpsc::channel(); let tx = Box::new(tx); let id: *const _ = &*tx; let idx = signal as usize; globals().signals[idx].recipients.lock().unwrap().push(tx); Ok(Signal { rx, id, signal }) } } impl Deref for Signal { type Target = mpsc::Receiver<()>; fn deref(&self) -> &mpsc::Receiver<()> { &self.rx } } impl Drop for Signal { fn drop(&mut self) { let idx = self.signal as usize; let mut list = globals().signals[idx].recipients.lock().unwrap(); list.retain(|sender| &**sender as *const _ != self.id); } }
{ return; }
role_profile.py
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class RoleProfile(Document): def autoname(self): """set name as Role Profile name""" self.name = self.role_profile def
(self): """Changes in role_profile reflected across all its user""" users = frappe.get_all("User", filters={"role_profile_name": self.name}) roles = [role.role for role in self.roles] for d in users: user = frappe.get_doc("User", d) user.set("roles", []) user.add_roles(*roles)
on_update
0003_set_site_domain_and_name.py
""" To understand why this file is here, please read: http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django """ from django.conf import settings from django.db import migrations def update_site_forward(apps, schema_editor): """Set site domain and name.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={ "domain": "example.com", "name": "django-react-hybrid", }, ) def
(apps, schema_editor): """Revert site domain and name to default.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"} ) class Migration(migrations.Migration): dependencies = [("sites", "0002_alter_domain_unique")] operations = [migrations.RunPython(update_site_forward, update_site_backward)]
update_site_backward
styled-components.js
import styled from "styled-components" import Image from "gatsby-image" //Assets import variables from "../../assets/styles/variables" import { breakpoints } from "../../assets/styles/breakpoints" // spring: import { animated as a } from "react-spring" const Container = styled.section` background: whitesmoke; position: relative; z-index: 0; padding: 0 0 100px; ` const Title = styled(a.h2)` color: black; text-align: center; width: 100%; margin: 100px auto 60px; position: relative; z-index: 0; line-height: 39px; @media screen and (min-width: ${breakpoints.large}px) { margin: 180px auto 80px; } ` const Description = styled(a.div)` margin: 0 0 60px; a { color: ${variables.primary}; text-decoration: none; position: relative; :after { content: "";
bottom: 0; right: 0; background: ${variables.primary}; width: 100%; -webkit-transition: width 0.4s; transition: width 0.4s; } &:hover:after { width: 0; } } ` const DecorationLayer = styled(a.div)` display: flex; margin-bottom: 20px; position: relative; overflow: hidden; width: 100%; box-shadow: 0px 3px 4px rgba(0, 0, 0, 0.5); border-radius: 5px; margin: 20px auto; user-select: none; max-width: 500px; ::before { content: " "; background: ${`radial-gradient(${variables.primary}, transparent)`}; z-index: 0; height: 180px; position: absolute; -webkit-transform: rotate(-3deg); -ms-transform: rotate(-3deg); transform: rotate(-3deg); width: 1000px; bottom: -110px; right: -210px; } ::after { content: " "; z-index: 1; height: 250px; position: absolute; -webkit-transform: rotate(106deg); -ms-transform: rotate(106deg); transform: rotate(106deg); width: 1000px; bottom: 200px; right: -630px; opacity: 0.7; background: ${`radial-gradient(${variables.primaryLight}, transparent)`}; } ` const ProjectCardContainer = styled.div` display: flex; flex-direction: column; background: white; text-align: center; flex-basis: 100%; width: 100%; margin: 0 auto; padding: 30px 20px 100px; overflow: hidden; position: relative; ::before { content: " "; background: ${`radial-gradient(${variables.primary}, transparent)`}; z-index: 0; height: 180px; position: absolute; -webkit-transform: rotate(-6deg); -ms-transform: rotate(-6deg); transform: rotate(-6deg); width: 700px; top: -90px; right: -90px; } ::after { content: " "; right: 0; z-index: 1; height: 180px; position: absolute; -webkit-transform: rotate(99deg); -ms-transform: rotate(99deg); transform: rotate(99deg); width: 1500px; top: -210px; left: -764px; opacity: 0.7; background: ${`radial-gradient(${variables.primaryLight}, transparent)`}; } a { margin: auto auto 0; text-decoration: none; color: royalblue; font-size: 16px; font-weight: 700; line-height: 24px; padding: 12px 30px; border-radius: 30px; user-select: none; outline: 0; width: fit-content; background: white; color: royalblue; border: 1px solid royalblue; transition: 0.4s all ease; &:hover { background: royalblue; color: white; } } ` const CardHeader = styled.div` width: inherit; display: flex; flex-direction: column; justify-content: center; align-items: center; z-index: 10; ` const ProjectTitle = styled.h3` color: white; text-shadow: 1px 1px 1px gray; font-size: 24px; ` const ProjectSubTitle = styled.h4` color: ${variables.primaryLight}; font-size: 20px; ` const ProjectImageContainer = styled.div` width: 80%; height: 200px; margin: 15px auto 30px; box-shadow: 0 3px 4px 0 rgba(0, 0, 0, 0.3); ` const ProjectImage = styled(Image)` min-height: 170px; height: 100%; img { object-position: top !important; } ` const Divider = styled.hr` margin: 15px 0; background: #d8d8d8; border: none; height: 1px; ` const ProjectDescription = styled.div` font-weight: light; padding: 20px auto; margin: 30px auto 45px; ul { margin: 0; } ` const DescriptionPoint = styled.li` text-align: left; ` export { Container, Title, Description, DecorationLayer, ProjectCardContainer, CardHeader, ProjectTitle, ProjectSubTitle, ProjectImageContainer, ProjectImage, Divider, ProjectDescription, DescriptionPoint, }
height: 1px; position: absolute;
splunkenv.py
# Copyright 2016 Splunk, Inc. # SPDX-FileCopyrightText: 2020 2020 # # SPDX-License-Identifier: Apache-2.0 """ Splunk platform related utilities. """ import os import os.path as op import subprocess import socket try: from ConfigParser import ConfigParser CONF_PARSER_KWARGS = {} except ImportError: from configparser import ConfigParser CONF_PARSER_KWARGS = {"strict": False} from io import StringIO from . import utils __all__ = [ "make_splunkhome_path", "get_splunk_host_info", "get_splunk_bin", "get_splunkd_access_info", "get_splunkd_uri", "get_conf_key_value", "get_conf_stanza", "get_conf_stanzas", ] ETC_LEAF = "etc" # See validateSearchHeadPooling() in src/libbundle/ConfSettings.cpp on_shared_storage = [ os.path.join(ETC_LEAF, "apps"), os.path.join(ETC_LEAF, "users"), os.path.join("var", "run", "splunk", "dispatch"), os.path.join("var", "run", "splunk", "srtemp"), os.path.join("var", "run", "splunk", "rss"), os.path.join("var", "run", "splunk", "scheduler"), os.path.join("var", "run", "splunk", "lookup_tmp"), ] def _splunk_home(): return os.path.normpath(os.environ["SPLUNK_HOME"]) def _splunk_etc(): try: result = os.environ["SPLUNK_ETC"] except KeyError:
result = op.join(_splunk_home(), ETC_LEAF) return os.path.normpath(result) def _get_shared_storage(): """Get splunk shared storage name. :returns: Splunk shared storage name. :rtype: ``string`` """ try: state = get_conf_key_value("server", "pooling", "state") storage = get_conf_key_value("server", "pooling", "storage") except KeyError: state = "disabled" storage = None if state == "enabled" and storage: return storage return None # Verify path prefix and return true if both paths have drives def _verify_path_prefix(path, start): path_drive = os.path.splitdrive(path)[0] start_drive = os.path.splitdrive(start)[0] return len(path_drive) == len(start_drive) def make_splunkhome_path(parts): """Construct absolute path by $SPLUNK_HOME and `parts`. Concatenate $SPLUNK_HOME and `parts` to an absolute path. For example, `parts` is ['etc', 'apps', 'Splunk_TA_test'], the return path will be $SPLUNK_HOME/etc/apps/Splunk_TA_test. Note: this function assumed SPLUNK_HOME is in environment varialbes. :param parts: Path parts. :type parts: ``list, tuple`` :returns: Absolute path. :rtype: ``string`` :raises ValueError: Escape from intended parent directories. """ relpath = os.path.normpath(os.path.join(*parts)) basepath = None shared_storage = _get_shared_storage() if shared_storage: for candidate in on_shared_storage: # SPL-100508 On windows if the path is missing the drive letter, # construct fullpath manually and call relpath if os.name == "nt" and not _verify_path_prefix(relpath, candidate): break if os.path.relpath(relpath, candidate)[0:2] != "..": basepath = shared_storage break if basepath is None: etc_with_trailing_sep = os.path.join(ETC_LEAF, "") if relpath == ETC_LEAF or relpath.startswith(etc_with_trailing_sep): # Redirect $SPLUNK_HOME/etc to $SPLUNK_ETC. basepath = _splunk_etc() # Remove leading etc (and path separator, if present). Note: when # emitting $SPLUNK_ETC exactly, with no additional path parts, we # set <relpath> to the empty string. relpath = relpath[4:] else: basepath = _splunk_home() fullpath = os.path.normpath(os.path.join(basepath, relpath)) # Check that we haven't escaped from intended parent directories. if os.path.relpath(fullpath, basepath)[0:2] == "..": raise ValueError( 'Illegal escape from parent directory "%s": %s' % (basepath, fullpath) ) return fullpath def get_splunk_host_info(): """Get splunk host info. :returns: Tuple of (server_name, host_name). :rtype: ``tuple`` """ server_name = get_conf_key_value("server", "general", "serverName") host_name = socket.gethostname() return (server_name, host_name) def get_splunk_bin(): """Get absolute path of splunk CLI. :returns: absolute path of splunk CLI :rtype: ``string`` """ if os.name == "nt": splunk_bin = "splunk.exe" else: splunk_bin = "splunk" return make_splunkhome_path(("bin", splunk_bin)) def get_splunkd_access_info(): """Get splunkd server access info. :returns: Tuple of (scheme, host, port). :rtype: ``tuple`` """ if utils.is_true(get_conf_key_value("server", "sslConfig", "enableSplunkdSSL")): scheme = "https" else: scheme = "http" host_port = get_conf_key_value("web", "settings", "mgmtHostPort") host_port = host_port.strip() host = host_port.split(":")[0] port = int(host_port.split(":")[1]) if "SPLUNK_BINDIP" in os.environ: bindip = os.environ["SPLUNK_BINDIP"] port_idx = bindip.rfind(":") host = bindip[:port_idx] if port_idx > 0 else bindip return (scheme, host, port) def get_splunkd_uri(): """Get splunkd uri. :returns: Splunkd uri. :rtype: ``string`` """ if os.environ.get("SPLUNKD_URI"): return os.environ["SPLUNKD_URI"] scheme, host, port = get_splunkd_access_info() return "{scheme}://{host}:{port}".format(scheme=scheme, host=host, port=port) def get_conf_key_value(conf_name, stanza, key): """Get value of `key` of `stanza` in `conf_name`. :param conf_name: Config file. :type conf_name: ``string`` :param stanza: Stanza name. :type stanza: ``string`` :param key: Key name. :type key: ``string`` :returns: Config value. :rtype: ``(string, list, dict)`` :raises KeyError: If `stanza` or `key` doesn't exist. """ stanzas = get_conf_stanzas(conf_name) return stanzas[stanza][key] def get_conf_stanza(conf_name, stanza): """Get `stanza` in `conf_name`. :param conf_name: Config file. :type conf_name: ``string`` :param stanza: Stanza name. :type stanza: ``string`` :returns: Config stanza. :rtype: ``dict`` :raises KeyError: If stanza doesn't exist. """ stanzas = get_conf_stanzas(conf_name) return stanzas[stanza] def get_conf_stanzas(conf_name): """Get stanzas of `conf_name` :param conf_name: Config file. :type conf_name: ``string`` :returns: Config stanzas. :rtype: ``dict`` Usage:: >>> stanzas = get_conf_stanzas('server') >>> return: {'serverName': 'testServer', 'sessionTimeout': '1h', ...} """ if conf_name.endswith(".conf"): conf_name = conf_name[:-5] # TODO: dynamically caculate SPLUNK_HOME btool_cli = [ op.join(os.environ["SPLUNK_HOME"], "bin", "splunk"), "cmd", "btool", conf_name, "list", ] p = subprocess.Popen(btool_cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = p.communicate() if isinstance(out, bytes): out = out.decode() parser = ConfigParser(**CONF_PARSER_KWARGS) parser.optionxform = str parser.readfp(StringIO(out)) out = {} for section in parser.sections(): out[section] = {item[0]: item[1] for item in parser.items(section, raw=True)} return out
cell_renderer.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use cairo; use gdk; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::Value; use glib_sys; use gobject_sys; use gtk_sys; use libc; use std::boxed::Box as Box_; use std::fmt; use std::mem; use std::mem::transmute; use CellEditable; use CellRendererMode; use CellRendererState; use Requisition; use SizeRequestMode; use StateFlags; use TreePath; use Widget; glib_wrapper! { pub struct CellRenderer(Object<gtk_sys::GtkCellRenderer, gtk_sys::GtkCellRendererClass, CellRendererClass>); match fn { get_type => || gtk_sys::gtk_cell_renderer_get_type(), } } pub const NONE_CELL_RENDERER: Option<&CellRenderer> = None; pub trait CellRendererExt: 'static { fn activate<P: IsA<Widget>>( &self, event: &gdk::Event, widget: &P, path: &str, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ) -> bool; fn get_aligned_area<P: IsA<Widget>>( &self, widget: &P, flags: CellRendererState, cell_area: &gdk::Rectangle, ) -> gdk::Rectangle; fn get_alignment(&self) -> (f32, f32); fn get_fixed_size(&self) -> (i32, i32); fn get_padding(&self) -> (i32, i32); fn get_preferred_height<P: IsA<Widget>>(&self, widget: &P) -> (i32, i32); fn get_preferred_height_for_width<P: IsA<Widget>>(&self, widget: &P, width: i32) -> (i32, i32); fn get_preferred_size<P: IsA<Widget>>(&self, widget: &P) -> (Requisition, Requisition); fn get_preferred_width<P: IsA<Widget>>(&self, widget: &P) -> (i32, i32); fn get_preferred_width_for_height<P: IsA<Widget>>(&self, widget: &P, height: i32) -> (i32, i32); fn get_request_mode(&self) -> SizeRequestMode; fn get_sensitive(&self) -> bool; fn get_state<P: IsA<Widget>>( &self, widget: Option<&P>, cell_state: CellRendererState, ) -> StateFlags; fn get_visible(&self) -> bool; fn is_activatable(&self) -> bool; fn render<P: IsA<Widget>>( &self, cr: &cairo::Context, widget: &P, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ); fn set_alignment(&self, xalign: f32, yalign: f32); fn set_fixed_size(&self, width: i32, height: i32); fn set_padding(&self, xpad: i32, ypad: i32); fn set_sensitive(&self, sensitive: bool); fn set_visible(&self, visible: bool); fn start_editing<P: IsA<Widget>>( &self, event: Option<&gdk::Event>, widget: &P, path: &str, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ) -> Option<CellEditable>; fn stop_editing(&self, canceled: bool); fn set_property_cell_background(&self, cell_background: Option<&str>); fn get_property_cell_background_rgba(&self) -> Option<gdk::RGBA>; fn set_property_cell_background_rgba(&self, cell_background_rgba: Option<&gdk::RGBA>); fn get_property_cell_background_set(&self) -> bool; fn set_property_cell_background_set(&self, cell_background_set: bool); fn get_property_editing(&self) -> bool; fn get_property_height(&self) -> i32; fn set_property_height(&self, height: i32); fn get_property_is_expanded(&self) -> bool; fn set_property_is_expanded(&self, is_expanded: bool); fn get_property_is_expander(&self) -> bool; fn set_property_is_expander(&self, is_expander: bool); fn get_property_mode(&self) -> CellRendererMode; fn set_property_mode(&self, mode: CellRendererMode); fn get_property_width(&self) -> i32; fn set_property_width(&self, width: i32); fn get_property_xalign(&self) -> f32; fn set_property_xalign(&self, xalign: f32); fn get_property_xpad(&self) -> u32; fn set_property_xpad(&self, xpad: u32); fn get_property_yalign(&self) -> f32; fn set_property_yalign(&self, yalign: f32); fn get_property_ypad(&self) -> u32; fn set_property_ypad(&self, ypad: u32); fn connect_editing_canceled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_editing_started<F: Fn(&Self, &CellEditable, TreePath) + 'static>( &self, f: F, ) -> SignalHandlerId; fn connect_property_cell_background_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId; fn connect_property_cell_background_rgba_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId; fn connect_property_cell_background_set_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId; fn connect_property_editing_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_is_expanded_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_is_expander_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_mode_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_sensitive_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_visible_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_xalign_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_xpad_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_yalign_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_ypad_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<CellRenderer>> CellRendererExt for O { fn activate<P: IsA<Widget>>( &self, event: &gdk::Event, widget: &P, path: &str, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ) -> bool { unsafe { from_glib(gtk_sys::gtk_cell_renderer_activate( self.as_ref().to_glib_none().0, mut_override(event.to_glib_none().0), widget.as_ref().to_glib_none().0, path.to_glib_none().0, background_area.to_glib_none().0, cell_area.to_glib_none().0, flags.to_glib(), )) } } fn get_aligned_area<P: IsA<Widget>>( &self, widget: &P, flags: CellRendererState, cell_area: &gdk::Rectangle, ) -> gdk::Rectangle { unsafe { let mut aligned_area = gdk::Rectangle::uninitialized(); gtk_sys::gtk_cell_renderer_get_aligned_area( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, flags.to_glib(), cell_area.to_glib_none().0, aligned_area.to_glib_none_mut().0, ); aligned_area } } fn get_alignment(&self) -> (f32, f32) { unsafe { let mut xalign = mem::MaybeUninit::uninit(); let mut yalign = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_alignment( self.as_ref().to_glib_none().0, xalign.as_mut_ptr(), yalign.as_mut_ptr(), ); let xalign = xalign.assume_init(); let yalign = yalign.assume_init(); (xalign, yalign) } } fn get_fixed_size(&self) -> (i32, i32) { unsafe { let mut width = mem::MaybeUninit::uninit(); let mut height = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_fixed_size( self.as_ref().to_glib_none().0, width.as_mut_ptr(), height.as_mut_ptr(), ); let width = width.assume_init(); let height = height.assume_init(); (width, height) } } fn get_padding(&self) -> (i32, i32) { unsafe { let mut xpad = mem::MaybeUninit::uninit(); let mut ypad = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_padding( self.as_ref().to_glib_none().0, xpad.as_mut_ptr(), ypad.as_mut_ptr(), ); let xpad = xpad.assume_init(); let ypad = ypad.assume_init(); (xpad, ypad) } } fn get_preferred_height<P: IsA<Widget>>(&self, widget: &P) -> (i32, i32) { unsafe { let mut minimum_size = mem::MaybeUninit::uninit(); let mut natural_size = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_preferred_height( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, minimum_size.as_mut_ptr(), natural_size.as_mut_ptr(), ); let minimum_size = minimum_size.assume_init(); let natural_size = natural_size.assume_init(); (minimum_size, natural_size) } } fn get_preferred_height_for_width<P: IsA<Widget>>(&self, widget: &P, width: i32) -> (i32, i32) { unsafe { let mut minimum_height = mem::MaybeUninit::uninit(); let mut natural_height = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_preferred_height_for_width( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, width, minimum_height.as_mut_ptr(), natural_height.as_mut_ptr(), ); let minimum_height = minimum_height.assume_init(); let natural_height = natural_height.assume_init(); (minimum_height, natural_height) } } fn get_preferred_size<P: IsA<Widget>>(&self, widget: &P) -> (Requisition, Requisition) { unsafe { let mut minimum_size = Requisition::uninitialized(); let mut natural_size = Requisition::uninitialized(); gtk_sys::gtk_cell_renderer_get_preferred_size( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, minimum_size.to_glib_none_mut().0, natural_size.to_glib_none_mut().0, ); (minimum_size, natural_size) } } fn get_preferred_width<P: IsA<Widget>>(&self, widget: &P) -> (i32, i32) { unsafe { let mut minimum_size = mem::MaybeUninit::uninit(); let mut natural_size = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_preferred_width( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, minimum_size.as_mut_ptr(), natural_size.as_mut_ptr(), ); let minimum_size = minimum_size.assume_init(); let natural_size = natural_size.assume_init(); (minimum_size, natural_size) } } fn get_preferred_width_for_height<P: IsA<Widget>>( &self, widget: &P, height: i32, ) -> (i32, i32) { unsafe { let mut minimum_width = mem::MaybeUninit::uninit(); let mut natural_width = mem::MaybeUninit::uninit(); gtk_sys::gtk_cell_renderer_get_preferred_width_for_height( self.as_ref().to_glib_none().0, widget.as_ref().to_glib_none().0, height, minimum_width.as_mut_ptr(), natural_width.as_mut_ptr(), ); let minimum_width = minimum_width.assume_init(); let natural_width = natural_width.assume_init(); (minimum_width, natural_width) } } fn get_request_mode(&self) -> SizeRequestMode { unsafe { from_glib(gtk_sys::gtk_cell_renderer_get_request_mode( self.as_ref().to_glib_none().0, )) } } fn get_sensitive(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_cell_renderer_get_sensitive( self.as_ref().to_glib_none().0, )) } } fn get_state<P: IsA<Widget>>( &self, widget: Option<&P>, cell_state: CellRendererState, ) -> StateFlags { unsafe { from_glib(gtk_sys::gtk_cell_renderer_get_state( self.as_ref().to_glib_none().0, widget.map(|p| p.as_ref()).to_glib_none().0, cell_state.to_glib(), )) } } fn get_visible(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_cell_renderer_get_visible( self.as_ref().to_glib_none().0, )) } } fn is_activatable(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_cell_renderer_is_activatable( self.as_ref().to_glib_none().0, )) } } fn render<P: IsA<Widget>>( &self, cr: &cairo::Context, widget: &P, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ) { unsafe { gtk_sys::gtk_cell_renderer_render( self.as_ref().to_glib_none().0, mut_override(cr.to_glib_none().0), widget.as_ref().to_glib_none().0, background_area.to_glib_none().0, cell_area.to_glib_none().0, flags.to_glib(), ); } } fn set_alignment(&self, xalign: f32, yalign: f32) { unsafe { gtk_sys::gtk_cell_renderer_set_alignment( self.as_ref().to_glib_none().0, xalign, yalign, ); } } fn set_fixed_size(&self, width: i32, height: i32) { unsafe { gtk_sys::gtk_cell_renderer_set_fixed_size( self.as_ref().to_glib_none().0, width, height, ); } } fn set_padding(&self, xpad: i32, ypad: i32) { unsafe { gtk_sys::gtk_cell_renderer_set_padding(self.as_ref().to_glib_none().0, xpad, ypad); } } fn set_sensitive(&self, sensitive: bool) { unsafe { gtk_sys::gtk_cell_renderer_set_sensitive( self.as_ref().to_glib_none().0, sensitive.to_glib(), ); } } fn set_visible(&self, visible: bool) { unsafe { gtk_sys::gtk_cell_renderer_set_visible( self.as_ref().to_glib_none().0, visible.to_glib(), ); } } fn start_editing<P: IsA<Widget>>( &self, event: Option<&gdk::Event>, widget: &P, path: &str, background_area: &gdk::Rectangle, cell_area: &gdk::Rectangle, flags: CellRendererState, ) -> Option<CellEditable> { unsafe { from_glib_none(gtk_sys::gtk_cell_renderer_start_editing( self.as_ref().to_glib_none().0, mut_override(event.to_glib_none().0), widget.as_ref().to_glib_none().0, path.to_glib_none().0, background_area.to_glib_none().0, cell_area.to_glib_none().0, flags.to_glib(), )) } } fn stop_editing(&self, canceled: bool) { unsafe { gtk_sys::gtk_cell_renderer_stop_editing( self.as_ref().to_glib_none().0, canceled.to_glib(), ); } } fn set_property_cell_background(&self, cell_background: Option<&str>) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"cell-background\0".as_ptr() as *const _, Value::from(cell_background).to_glib_none().0, ); } } fn get_property_cell_background_rgba(&self) -> Option<gdk::RGBA> { unsafe { let mut value = Value::from_type(<gdk::RGBA as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"cell-background-rgba\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `cell-background-rgba` getter") } } fn set_property_cell_background_rgba(&self, cell_background_rgba: Option<&gdk::RGBA>) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"cell-background-rgba\0".as_ptr() as *const _, Value::from(cell_background_rgba).to_glib_none().0, ); } } fn get_property_cell_background_set(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"cell-background-set\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `cell-background-set` getter") .unwrap() } } fn set_property_cell_background_set(&self, cell_background_set: bool) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"cell-background-set\0".as_ptr() as *const _, Value::from(&cell_background_set).to_glib_none().0, ); } } fn get_property_editing(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"editing\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `editing` getter") .unwrap() } } fn get_property_height(&self) -> i32 { unsafe { let mut value = Value::from_type(<i32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"height\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `height` getter") .unwrap() } } fn set_property_height(&self, height: i32)
fn get_property_is_expanded(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"is-expanded\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `is-expanded` getter") .unwrap() } } fn set_property_is_expanded(&self, is_expanded: bool) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"is-expanded\0".as_ptr() as *const _, Value::from(&is_expanded).to_glib_none().0, ); } } fn get_property_is_expander(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"is-expander\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `is-expander` getter") .unwrap() } } fn set_property_is_expander(&self, is_expander: bool) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"is-expander\0".as_ptr() as *const _, Value::from(&is_expander).to_glib_none().0, ); } } fn get_property_mode(&self) -> CellRendererMode { unsafe { let mut value = Value::from_type(<CellRendererMode as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"mode\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `mode` getter") .unwrap() } } fn set_property_mode(&self, mode: CellRendererMode) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"mode\0".as_ptr() as *const _, Value::from(&mode).to_glib_none().0, ); } } fn get_property_width(&self) -> i32 { unsafe { let mut value = Value::from_type(<i32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"width\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `width` getter") .unwrap() } } fn set_property_width(&self, width: i32) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"width\0".as_ptr() as *const _, Value::from(&width).to_glib_none().0, ); } } fn get_property_xalign(&self) -> f32 { unsafe { let mut value = Value::from_type(<f32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"xalign\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `xalign` getter") .unwrap() } } fn set_property_xalign(&self, xalign: f32) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"xalign\0".as_ptr() as *const _, Value::from(&xalign).to_glib_none().0, ); } } fn get_property_xpad(&self) -> u32 { unsafe { let mut value = Value::from_type(<u32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"xpad\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `xpad` getter") .unwrap() } } fn set_property_xpad(&self, xpad: u32) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"xpad\0".as_ptr() as *const _, Value::from(&xpad).to_glib_none().0, ); } } fn get_property_yalign(&self) -> f32 { unsafe { let mut value = Value::from_type(<f32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"yalign\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `yalign` getter") .unwrap() } } fn set_property_yalign(&self, yalign: f32) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"yalign\0".as_ptr() as *const _, Value::from(&yalign).to_glib_none().0, ); } } fn get_property_ypad(&self) -> u32 { unsafe { let mut value = Value::from_type(<u32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"ypad\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `ypad` getter") .unwrap() } } fn set_property_ypad(&self, ypad: u32) { unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"ypad\0".as_ptr() as *const _, Value::from(&ypad).to_glib_none().0, ); } } fn connect_editing_canceled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn editing_canceled_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"editing-canceled\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( editing_canceled_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_editing_started<F: Fn(&Self, &CellEditable, TreePath) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn editing_started_trampoline< P, F: Fn(&P, &CellEditable, TreePath) + 'static, >( this: *mut gtk_sys::GtkCellRenderer, editable: *mut gtk_sys::GtkCellEditable, path: *mut libc::c_char, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); let path = from_glib_full(gtk_sys::gtk_tree_path_new_from_string(path)); f( &CellRenderer::from_glib_borrow(this).unsafe_cast_ref(), &from_glib_borrow(editable), path, ) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"editing-started\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( editing_started_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_cell_background_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_cell_background_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::cell-background\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_cell_background_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_cell_background_rgba_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_cell_background_rgba_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::cell-background-rgba\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_cell_background_rgba_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_cell_background_set_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_cell_background_set_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::cell-background-set\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_cell_background_set_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_editing_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_editing_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::editing\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_editing_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_height_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::height\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_height_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_is_expanded_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_is_expanded_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::is-expanded\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_is_expanded_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_is_expander_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_is_expander_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::is-expander\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_is_expander_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_mode_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_mode_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::mode\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_mode_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_sensitive_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_sensitive_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::sensitive\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_sensitive_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_visible_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_visible_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::visible\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_visible_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_width_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::width\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_width_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_xalign_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_xalign_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::xalign\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_xalign_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_xpad_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_xpad_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::xpad\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_xpad_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_yalign_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_yalign_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::yalign\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_yalign_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_property_ypad_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_ypad_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkCellRenderer, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<CellRenderer>, { let f: &F = &*(f as *const F); f(&CellRenderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::ypad\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_ypad_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for CellRenderer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "CellRenderer") } }
{ unsafe { gobject_sys::g_object_set_property( self.to_glib_none().0 as *mut gobject_sys::GObject, b"height\0".as_ptr() as *const _, Value::from(&height).to_glib_none().0, ); } }
recipe.py
# -*- coding: utf-8 -*- from typing import List, Dict, AnyStr from retry import retry from ratelimit import limits, RateLimitException import dataiku from dataiku.customrecipe import get_recipe_config, get_input_names_for_role, get_output_names_for_role from plugin_io_utils import ErrorHandlingEnum, validate_column_input from dku_io_utils import set_column_description from amazon_comprehend_api_client import API_EXCEPTIONS, batch_api_response_parser, get_client from api_parallelizer import api_parallelizer from amazon_comprehend_api_formatting import LanguageDetectionAPIFormatter # ============================================================================== # SETUP # ============================================================================== api_configuration_preset = get_recipe_config().get("api_configuration_preset") api_quota_rate_limit = api_configuration_preset.get("api_quota_rate_limit") api_quota_period = api_configuration_preset.get("api_quota_period") parallel_workers = api_configuration_preset.get("parallel_workers") batch_size = api_configuration_preset.get("batch_size") text_column = get_recipe_config().get("text_column") error_handling = ErrorHandlingEnum[get_recipe_config().get("error_handling")] input_dataset_name = get_input_names_for_role("input_dataset")[0] input_dataset = dataiku.Dataset(input_dataset_name) input_schema = input_dataset.read_schema() input_columns_names = [col["name"] for col in input_schema] output_dataset_name = get_output_names_for_role("output_dataset")[0] output_dataset = dataiku.Dataset(output_dataset_name) validate_column_input(text_column, input_columns_names) input_df = input_dataset.get_dataframe() client = get_client(api_configuration_preset) column_prefix = "lang_detect_api" batch_kwargs = { "api_support_batch": True, "batch_size": batch_size, "batch_api_response_parser": batch_api_response_parser, } # ============================================================================== # RUN # ============================================================================== @retry((RateLimitException, OSError), delay=api_quota_period, tries=5) @limits(calls=api_quota_rate_limit, period=api_quota_period) def call_api_language_detection(batch: List[Dict], text_column: AnyStr) -> List[Dict]:
df = api_parallelizer( input_df=input_df, api_call_function=call_api_language_detection, api_exceptions=API_EXCEPTIONS, column_prefix=column_prefix, text_column=text_column, parallel_workers=parallel_workers, error_handling=error_handling, **batch_kwargs ) api_formatter = LanguageDetectionAPIFormatter( input_df=input_df, column_prefix=column_prefix, error_handling=error_handling, ) output_df = api_formatter.format_df(df) output_dataset.write_with_schema(output_df) set_column_description( input_dataset=input_dataset, output_dataset=output_dataset, column_description_dict=api_formatter.column_description_dict, )
text_list = [str(r.get(text_column, "")).strip() for r in batch] responses = client.batch_detect_dominant_language(TextList=text_list) return responses
content_artifacts_creator.py
# -*- coding: utf-8 -*- import logging import os import re import sys import time from concurrent.futures import as_completed from contextlib import contextmanager from shutil import make_archive, rmtree from typing import Callable, Dict, List, Optional, Union from packaging.version import parse from pebble import ProcessFuture, ProcessPool from wcmatch.pathlib import BRACE, EXTMATCH, NEGATE, NODIR, SPLIT, Path from demisto_sdk.commands.common.constants import ( BASE_PACK, CLASSIFIERS_DIR, CONTENT_ITEMS_DISPLAY_FOLDERS, DASHBOARDS_DIR, DOCUMENTATION_DIR, GENERIC_DEFINITIONS_DIR, GENERIC_FIELDS_DIR, GENERIC_MODULES_DIR, GENERIC_TYPES_DIR, INCIDENT_FIELDS_DIR, INCIDENT_TYPES_DIR, INDICATOR_FIELDS_DIR, INDICATOR_TYPES_DIR, INTEGRATIONS_DIR, LAYOUTS_DIR, PACKS_DIR, PLAYBOOKS_DIR, PRE_PROCESS_RULES_DIR, RELEASE_NOTES_DIR, REPORTS_DIR, SCRIPTS_DIR, TEST_PLAYBOOKS_DIR, TOOLS_DIR, WIDGETS_DIR, ContentItems) from demisto_sdk.commands.common.content import (Content, ContentError, ContentFactoryError, Pack) from demisto_sdk.commands.common.content.objects.pack_objects import ( JSONContentObject, Script, TextObject, YAMLContentObject, YAMLContentUnifiedObject) from demisto_sdk.commands.common.tools import arg_to_list from .artifacts_report import ArtifactsReport, ObjectReport #################### # Global variables # #################### FIRST_MARKETPLACE_VERSION = parse('6.0.0') IGNORED_PACKS = ['ApiModules'] IGNORED_TEST_PLAYBOOKS_DIR = 'Deprecated' ContentObject = Union[YAMLContentUnifiedObject, YAMLContentObject, JSONContentObject, TextObject] logger = logging.getLogger('demisto-sdk') EX_SUCCESS = 0 EX_FAIL = 1 ############## # Main logic # ############## class ArtifactsManager: def __init__(self, artifacts_path: str, zip: bool, packs: bool, content_version: str, suffix: str, cpus: int, id_set_path: str = '', pack_names: str = 'all', signature_key: str = '', sign_directory: Path = None, remove_test_playbooks: bool = True): """ Content artifacts configuration Args: artifacts_path: existing destination directory for creating artifacts. zip: True for zip all content artifacts to 3 different zip files in same structure else False. packs: create only content_packs artifacts if True. content_version: release content version. suffix: suffix to add all file we creates. cpus: available cpus in the computer. id_set_path: the full path of id_set.json. pack_names: Packs to create artifacts for. signature_key: Base64 encoded signature key used for signing packs. sign_directory: Path to the signDirectory executable file. remove_test_playbooks: Should remove test playbooks from content packs or not. """ # options arguments self.artifacts_path = Path(artifacts_path) self.zip_artifacts = zip self.only_content_packs = packs self.content_version = content_version self.suffix = suffix self.cpus = cpus self.id_set_path = id_set_path self.pack_names = arg_to_list(pack_names) self.signature_key = signature_key self.signDirectory = sign_directory self.remove_test_playbooks = remove_test_playbooks # run related arguments self.content_new_path = self.artifacts_path / 'content_new' self.content_test_path = self.artifacts_path / 'content_test' self.content_packs_path = self.artifacts_path / 'content_packs' self.content_all_path = self.artifacts_path / 'all_content' self.content_uploadable_zips_path = self.artifacts_path / 'uploadable_packs' # inits self.content = Content.from_cwd() self.execution_start = time.time() self.packs = self.content.packs self.exit_code = EX_SUCCESS def create_content_artifacts(self) -> int: with ArtifactsDirsHandler(self), ProcessPoolHandler(self) as pool: futures: List[ProcessFuture] = [] # content/Packs futures.extend(dump_packs(self, pool)) # content/TestPlaybooks if not self.remove_test_playbooks: futures.append(pool.schedule(dump_tests_conditionally, args=(self,))) # content/content-descriptor.json futures.append(pool.schedule(dump_content_descriptor, args=(self,))) # content/Documentation/doc-*.json futures.append(pool.schedule(dump_content_documentations, args=(self,))) # Wait for all futures to be finished wait_futures_complete(futures, self) # Add suffix suffix_handler(self) if os.path.exists('keyfile'): os.remove('keyfile') logger.info(f"\nExecution time: {time.time() - self.execution_start} seconds") return self.exit_code def get_relative_pack_path(self, content_object: ContentObject): """ Args: content_object: the object to get the relative path for Returns: the path of the given object relative from the pack directory, for example HelloWorld/Scripts/some_script """ return content_object.path.relative_to(self.content.path / PACKS_DIR) def get_base_path(self) -> Path: """ Returns: the path that all artifacts are relative to """ return self.content.path def get_dir_to_delete(self): """ Returns: list of directories to delete after artifacts was created """ return [self.content_test_path, self.content_new_path, self.content_packs_path, self.content_all_path] class ContentItemsHandler: def __init__(self): self.server_min_version = parse('1.0.0') self.content_items: Dict[ContentItems, List] = { ContentItems.SCRIPTS: [], ContentItems.PLAYBOOKS: [], ContentItems.INTEGRATIONS: [], ContentItems.INCIDENT_FIELDS: [], ContentItems.INCIDENT_TYPES: [], ContentItems.DASHBOARDS: [], ContentItems.INDICATOR_FIELDS: [], ContentItems.REPORTS: [], ContentItems.INDICATOR_TYPES: [], ContentItems.LAYOUTS: [], ContentItems.PRE_PROCESS_RULES: [], ContentItems.CLASSIFIERS: [], ContentItems.WIDGETS: [], ContentItems.GENERIC_FIELDS: [], ContentItems.GENERIC_TYPES: [], ContentItems.GENERIC_MODULES: [], ContentItems.GENERIC_DEFINITIONS: [] } self.content_folder_name_to_func: Dict[str, Callable] = { SCRIPTS_DIR: self.add_script_as_content_item, PLAYBOOKS_DIR: self.add_playbook_as_content_item, INTEGRATIONS_DIR: self.add_integration_as_content_item, INCIDENT_FIELDS_DIR: self.add_incident_field_as_content_item, INCIDENT_TYPES_DIR: self.add_incident_type_as_content_item, DASHBOARDS_DIR: self.add_dashboard_as_content_item, INDICATOR_FIELDS_DIR: self.add_indicator_field_as_content_item, INDICATOR_TYPES_DIR: self.add_indicator_type_as_content_item, REPORTS_DIR: self.add_report_as_content_item, LAYOUTS_DIR: self.add_layout_as_content_item, PRE_PROCESS_RULES_DIR: self.add_pre_process_rules_as_content_item, CLASSIFIERS_DIR: self.add_classifier_as_content_item, WIDGETS_DIR: self.add_widget_as_content_item, GENERIC_TYPES_DIR: self.add_generic_type_as_content_item, GENERIC_FIELDS_DIR: self.add_generic_field_as_content_item, GENERIC_MODULES_DIR: self.add_generic_module_as_content_item, GENERIC_DEFINITIONS_DIR: self.add_generic_definition_as_content_item } def handle_content_item(self, content_object: ContentObject): """Verifies the validity of the content object and parses it to the correct entities list. Args: content_object (ContentObject): The object to add to entities list. """ content_object_directory = content_object.path.parts[-3] if content_object_directory not in self.content_folder_name_to_func.keys(): # In the case where the content object is nested directly in the entities directory (Playbooks for example). content_object_directory = content_object.path.parts[-2] if content_object.to_version < FIRST_MARKETPLACE_VERSION: return # reputation in old format aren't supported in 6.0.0 server version if content_object_directory == INDICATOR_TYPES_DIR and not re.match(content_object.path.name, 'reputation-.*.json'): return # skip content items that are not displayed in contentItems if content_object_directory not in CONTENT_ITEMS_DISPLAY_FOLDERS: return self.server_min_version = max(self.server_min_version, content_object.from_version) self.content_folder_name_to_func[content_object_directory](content_object) def add_script_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.SCRIPTS].append({ 'name': content_object.get('name', ''), 'description': content_object.get('comment', ''), 'tags': content_object.get('tags', []) }) def add_playbook_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.PLAYBOOKS].append({ 'name': content_object.get('name', ''), 'description': content_object.get('description', ''), }) def add_integration_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.INTEGRATIONS].append({ 'name': content_object.get('display', ""), 'description': content_object.get('description', ''), 'category': content_object.get('category', ''), 'commands': [ { 'name': command.get('name', ''), 'description': command.get('description', '') } for command in content_object.script.get('commands', [])] }) def add_incident_field_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.INCIDENT_FIELDS].append({ 'name': content_object.get('name', ''), 'type': content_object.get('type', ''), 'description': content_object.get('description', '') }) def add_incident_type_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.INCIDENT_TYPES].append({ 'name': content_object.get('name', ''), 'playbook': content_object.get('playbookId', ''), 'closureScript': content_object.get('closureScript', ''), 'hours': int(content_object.get('hours', 0)), 'days': int(content_object.get('days', 0)), 'weeks': int(content_object.get('weeks', 0)) }) def add_dashboard_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.DASHBOARDS].append({ 'name': content_object.get('name', '') }) def add_indicator_field_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.INDICATOR_FIELDS].append({ 'name': content_object.get('name', ''), 'type': content_object.get('type', ''), 'description': content_object.get('description', '') }) def add_indicator_type_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.INDICATOR_TYPES].append({ 'details': content_object.get('details', ''), 'reputationScriptName': content_object.get('reputationScriptName', ''), 'enhancementScriptNames': content_object.get('enhancementScriptNames', []) }) def add_report_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.REPORTS].append({ 'name': content_object.get('name', ''), 'description': content_object.get('description', '') }) def add_layout_as_content_item(self, content_object: ContentObject): if content_object.get('description') is not None: self.content_items[ContentItems.LAYOUTS].append({ 'name': content_object.get('name', ''), 'description': content_object.get('description') }) else: self.content_items[ContentItems.LAYOUTS].append({ 'name': content_object.get('name', '') }) def add_pre_process_rules_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.PRE_PROCESS_RULES].append({ 'name': content_object.get('name') or content_object.get('id', ''), 'description': content_object.get('description', ''), }) def add_classifier_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.CLASSIFIERS].append({ 'name': content_object.get('name') or content_object.get('id', ''), 'description': content_object.get('description', '') }) def add_widget_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.WIDGETS].append({ 'name': content_object.get('name', ''), 'dataType': content_object.get('dataType', ''), 'widgetType': content_object.get('widgetType', '') }) def add_generic_field_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.GENERIC_FIELDS].append({ 'name': content_object.get('name', ''), 'type': content_object.get('type', ''), 'description': content_object.get('description', '') }) def add_generic_type_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.GENERIC_TYPES].append({ 'name': content_object.get('name', ''), 'details': content_object.get('details', ''), }) def add_generic_definition_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.GENERIC_DEFINITIONS].append({ 'name': content_object.get('name', ''), 'description': content_object.get('description', '') }) def add_generic_module_as_content_item(self, content_object: ContentObject): self.content_items[ContentItems.GENERIC_MODULES].append({ 'name': content_object.get('name', ''), 'description': content_object.get('description', '') }) @contextmanager def ProcessPoolHandler(artifact_manager: ArtifactsManager) -> ProcessPool: """ Process pool Handler which terminate all processes in case of Exception. Args: artifact_manager: Artifacts manager object. Yields: ProcessPool: Pebble process pool. """ global logger with ProcessPool(max_workers=artifact_manager.cpus, initializer=child_mute) as pool: try: yield pool except KeyboardInterrupt: logger.info("\nCTRL+C Pressed!\nGracefully release all resources due to keyboard interrupt...") pool.stop() pool.join() raise except Exception as e: logger.exception(e) logger.error("Gracefully release all resources due to Error...") pool.stop() pool.join() raise else: pool.close() pool.join() finally: if os.path.exists('keyfile'): os.remove('keyfile') def wait_futures_complete(futures: List[ProcessFuture], artifact_manager: ArtifactsManager): """Wait for all futures to complete, Raise exception if occured. Args: artifact_manager: Artifacts manager object. futures: futures to wait for. Raises: Exception: Raise caught exception for further cleanups. """ global logger for future in as_completed(futures): try: result = future.result() if isinstance(result, ArtifactsReport): logger.info(result.to_str(artifact_manager.get_base_path())) except (ContentError, DuplicateFiles, ContentFactoryError) as e: logger.error(e.msg) raise except Exception as e: logger.exception(e) raise ##################################################### # Files include rules functions (Version, Type etc) # ##################################################### def is_in_content_packs(content_object: ContentObject) -> bool: """ Rules content_packs: 1. to_version >= First marketplace version. Args: content_object: Content object as specified in global variable - ContentObject. Returns: bool: True if object should be included in content_packs artifacts else False. """ return content_object.to_version >= FIRST_MARKETPLACE_VERSION def is_in_content_test(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool: """Rules content_test: 1. flag of only packs is off. 2. Object located in TestPlaybooks directory (*/TestPlaybooks/*). 3. from_version < First marketplace version. 4. Path of object is not including global variable - IGNORED_TEST_PLAYBOOKS_DIR Args: artifact_manager: Artifacts manager object. content_object: Content object as specified in global variable - ContentObject. Returns: bool: True if object should be included in content_test artifacts else False. """ return (not artifact_manager.only_content_packs and TEST_PLAYBOOKS_DIR in content_object.path.parts and content_object.from_version < FIRST_MARKETPLACE_VERSION and IGNORED_TEST_PLAYBOOKS_DIR not in content_object.path.parts) def is_in_content_new(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool: """ Rules content_new: 1. flag of only packs is off. 2. Object not located in TestPlaybooks directory (*/TestPlaybooks/*). 3. from_version < First marketplace version Args: artifact_manager: Artifacts manager object. content_object: Content object as specified in global variable - ContentObject. Returns: bool: True if object should be included in content_new artifacts else False. """ return (not artifact_manager.only_content_packs and TEST_PLAYBOOKS_DIR not in content_object.path.parts and content_object.from_version < FIRST_MARKETPLACE_VERSION) def is_in_content_all(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool: """ Rules content_all: 1. If in content_new or content_test. Args: artifact_manager: Artifacts manager object. content_object: Content object as specified in global variable - ContentObject. Returns: bool: True if object should be included in content_all artifacts else False. """ return is_in_content_new(artifact_manager, content_object) or is_in_content_test(artifact_manager, content_object) ############################ # Documentations functions # ############################ def dump_content_documentations(artifact_manager: ArtifactsManager) -> ArtifactsReport: """ Dumping Documentation/doc-*.json into: 1. content_new 2. content_all Args: artifact_manager: Artifacts manager object. Returns: ArtifactsReport: ArtifactsReport object. """ report = ArtifactsReport("Documentations:") for documentation in artifact_manager.content.documentations: object_report = ObjectReport(documentation, content_packs=True) created_files = documentation.dump(artifact_manager.content_packs_path / BASE_PACK / DOCUMENTATION_DIR) if not artifact_manager.only_content_packs: object_report.set_content_new() object_report.set_content_all() for dest in [artifact_manager.content_new_path, artifact_manager.content_all_path]: created_files = dump_link_files(artifact_manager, documentation, dest, created_files) report.append(object_report) return report ######################## # Descriptor functions # ######################## def dump_content_descriptor(artifact_manager: ArtifactsManager) -> ArtifactsReport: """ Dumping content/content_descriptor.json into: 1. content_test 2. content_new 3. content_all Args: artifact_manager: Artifacts manager object. Returns: ArtifactsReport: ArtifactsReport object. Notes: 1. content_descriptor.json created during build run time. """ report = ArtifactsReport("Content descriptor:") if not artifact_manager.only_content_packs and artifact_manager.content.content_descriptor: descriptor = artifact_manager.content.content_descriptor object_report = ObjectReport(descriptor, content_test=True, content_new=True, content_all=True) created_files: List[Path] = [] for dest in [artifact_manager.content_test_path, artifact_manager.content_new_path, artifact_manager.content_all_path]: created_files = dump_link_files(artifact_manager, descriptor, dest, created_files) report.append(object_report) return report ################################## # Content Testplaybook functions # ################################## def dump_tests_conditionally(artifact_manager: ArtifactsManager) -> ArtifactsReport: """ Dump test scripts/playbooks conditionally into: 1. content_test Args: artifact_manager: Artifacts manager object. Returns: ArtifactsReport: ArtifactsReport object. """ report = ArtifactsReport("TestPlaybooks:") for test in artifact_manager.content.test_playbooks: object_report = ObjectReport(test) if is_in_content_test(artifact_manager, test): object_report.set_content_test() test_created_files = dump_link_files(artifact_manager, test, artifact_manager.content_test_path) dump_link_files(artifact_manager, test, artifact_manager.content_all_path, test_created_files) report += object_report return report ########################### # Content packs functions # ########################### def dump_packs(artifact_manager: ArtifactsManager, pool: ProcessPool) -> List[ProcessFuture]:
def dump_pack(artifact_manager: ArtifactsManager, pack: Pack) -> ArtifactsReport: # noqa: C901 """ Dumping content/Packs/<pack_id>/ into: 1. content_test 2. content_new 3. content_all 4. content_packs 5. uploadable_packs Args: artifact_manager: Artifacts manager object. pack: Pack object. Notes: 1. Include all file object, excluding: a. Change_log files (Deprecated). b. Integration/Script/Playbook readme (Used for website documentation deployment). c. .pack-ignore (Internal only). d. .secrets-ignore (Internal only). Returns: ArtifactsReport: ArtifactsReport object. """ global logger pack_report = ArtifactsReport(f"Pack {pack.id}:") pack.metadata.load_user_metadata(pack.id, pack.path.name, pack.path, logger) content_items_handler = ContentItemsHandler() is_feed_pack = False for integration in pack.integrations: content_items_handler.handle_content_item(integration) is_feed_pack = is_feed_pack or integration.is_feed pack_report += dump_pack_conditionally(artifact_manager, integration) for script in pack.scripts: content_items_handler.handle_content_item(script) pack_report += dump_pack_conditionally(artifact_manager, script) for playbook in pack.playbooks: content_items_handler.handle_content_item(playbook) is_feed_pack = is_feed_pack or playbook.get('name', '').startswith('TIM') pack_report += dump_pack_conditionally(artifact_manager, playbook) for test_playbook in pack.test_playbooks: pack_report += dump_pack_conditionally(artifact_manager, test_playbook) for report in pack.reports: content_items_handler.handle_content_item(report) pack_report += dump_pack_conditionally(artifact_manager, report) for layout in pack.layouts: content_items_handler.handle_content_item(layout) pack_report += dump_pack_conditionally(artifact_manager, layout) for pre_process_rule in pack.pre_process_rules: content_items_handler.handle_content_item(pre_process_rule) pack_report += dump_pack_conditionally(artifact_manager, pre_process_rule) for dashboard in pack.dashboards: content_items_handler.handle_content_item(dashboard) pack_report += dump_pack_conditionally(artifact_manager, dashboard) for incident_field in pack.incident_fields: content_items_handler.handle_content_item(incident_field) pack_report += dump_pack_conditionally(artifact_manager, incident_field) for incident_type in pack.incident_types: content_items_handler.handle_content_item(incident_type) pack_report += dump_pack_conditionally(artifact_manager, incident_type) for indicator_field in pack.indicator_fields: content_items_handler.handle_content_item(indicator_field) pack_report += dump_pack_conditionally(artifact_manager, indicator_field) for indicator_type in pack.indicator_types: content_items_handler.handle_content_item(indicator_type) pack_report += dump_pack_conditionally(artifact_manager, indicator_type) for connection in pack.connections: pack_report += dump_pack_conditionally(artifact_manager, connection) for classifier in pack.classifiers: content_items_handler.handle_content_item(classifier) pack_report += dump_pack_conditionally(artifact_manager, classifier) for widget in pack.widgets: content_items_handler.handle_content_item(widget) pack_report += dump_pack_conditionally(artifact_manager, widget) for generic_definition in pack.generic_definitions: content_items_handler.handle_content_item(generic_definition) pack_report += dump_pack_conditionally(artifact_manager, generic_definition) for generic_module in pack.generic_modules: content_items_handler.handle_content_item(generic_module) pack_report += dump_pack_conditionally(artifact_manager, generic_module) for generic_type in pack.generic_types: content_items_handler.handle_content_item(generic_type) pack_report += dump_pack_conditionally(artifact_manager, generic_type) for generic_field in pack.generic_fields: content_items_handler.handle_content_item(generic_field) pack_report += dump_pack_conditionally(artifact_manager, generic_field) for release_note in pack.release_notes: pack_report += ObjectReport(release_note, content_packs=True) release_note.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR) for release_note_config in pack.release_notes_config: pack_report += ObjectReport(release_note_config, content_packs=True) release_note_config.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR) for tool in pack.tools: object_report = ObjectReport(tool, content_packs=True) created_files = tool.dump(artifact_manager.content_packs_path / pack.id / TOOLS_DIR) if not artifact_manager.only_content_packs: object_report.set_content_new() dump_link_files(artifact_manager, tool, artifact_manager.content_new_path, created_files) object_report.set_content_all() dump_link_files(artifact_manager, tool, artifact_manager.content_all_path, created_files) pack_report += object_report if pack.pack_metadata: pack_report += ObjectReport(pack.pack_metadata, content_packs=True) pack.pack_metadata.dump(artifact_manager.content_packs_path / pack.id) if pack.metadata: pack_report += ObjectReport(pack.metadata, content_packs=True) pack.metadata.content_items = content_items_handler.content_items pack.metadata.server_min_version = pack.metadata.server_min_version or content_items_handler.server_min_version if artifact_manager.id_set_path: # Dependencies can only be done when id_set file is given. pack.metadata.handle_dependencies(pack.path.name, artifact_manager.id_set_path, logger) else: logger.warning('Skipping dependencies extraction since no id_set file was provided.') if is_feed_pack and 'TIM' not in pack.metadata.tags: pack.metadata.tags.append('TIM') pack.metadata.dump_metadata_file(artifact_manager.content_packs_path / pack.id) if pack.readme or pack.contributors: if not pack.readme: readme_file = os.path.join(pack.path, 'README.md') open(readme_file, 'a+').close() readme_obj = pack.readme readme_obj.contributors = pack.contributors pack_report += ObjectReport(readme_obj, content_packs=True) readme_obj.dump(artifact_manager.content_packs_path / pack.id) if pack.author_image: pack_report += ObjectReport(pack.author_image, content_packs=True) pack.author_image.dump(artifact_manager.content_packs_path / pack.id) return pack_report def dump_pack_conditionally(artifact_manager: ArtifactsManager, content_object: ContentObject) -> ObjectReport: """ Dump pack object by the following logic Args: artifact_manager: Artifacts manager object. content_object: content_object (e.g. Integration/Script/Layout etc) Returns: ObjectReport: ObjectReport object. """ object_report = ObjectReport(content_object) pack_created_files: List[Path] = [] test_new_created_files: List[Path] = [] with content_files_handler(artifact_manager, content_object) as files_to_remove: # Content packs filter - When unify also _45.yml created which should be deleted after copy it if needed if is_in_content_packs(content_object): object_report.set_content_packs() # Unify will create *_45.yml files which shouldn't be in content_packs pack_created_files.extend(dump_link_files(artifact_manager, content_object, artifact_manager.content_packs_path / calc_relative_packs_dir(artifact_manager, content_object))) # Collecting files *_45.yml which created and need to be removed after execution. files_to_remove.extend( [created_file for created_file in pack_created_files if created_file.name.endswith('_45.yml')]) # Content test filter if is_in_content_test(artifact_manager, content_object): object_report.set_content_test() test_new_created_files = dump_link_files(artifact_manager, content_object, artifact_manager.content_test_path, pack_created_files) # Content new filter if is_in_content_new(artifact_manager, content_object): object_report.set_content_new() test_new_created_files = dump_link_files(artifact_manager, content_object, artifact_manager.content_new_path, pack_created_files) # Content all filter if is_in_content_all(artifact_manager, content_object): object_report.set_content_all() dump_link_files(artifact_manager, content_object, artifact_manager.content_all_path, test_new_created_files) return object_report @contextmanager def content_files_handler(artifact_manager: ArtifactsManager, content_object: ContentObject): """ Pre-processing pack, perform the following: 1. Change content/Packs/Base/Scripts/CommonServerPython.py global variables: a. CONTENT_RELEASE_VERSION to given content version flag. b. CONTENT_BRANCH_NAME to active branch Post-processing pack, perform the following: 1. Change content/Packs/Base/Scripts/CommonServerPython.py to original state. 2. Unifier creates *_45.yml files in content_pack by default which is not support due to_version lower than NEWEST_SUPPORTED_VERSION, Therefor after copy it to content_new, delete it. Args: artifact_manager: Command line configuration. content_object: content_object (e.g. Integration/Script/Layout etc) Yields: List[Path]: List of file to be removed after execution. """ files_to_remove: List[Path] = [] try: if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \ content_object.code_path and content_object.code_path.name == 'CommonServerPython.py': # Modify CommonServerPython.py global variables repo = artifact_manager.content.git() modify_common_server_constants(content_object.code_path, artifact_manager.content_version, 'master' if not repo else repo.active_branch) yield files_to_remove finally: if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \ content_object.code_path and content_object.code_path.name == 'CommonServerPython.py': # Modify CommonServerPython.py global variables modify_common_server_constants(content_object.code_path, '0.0.0', 'master') # Delete yaml which created by Unifier in packs and to_version/toVersion lower than NEWEST_SUPPORTED_VERSION for file_path in files_to_remove: file_path.unlink() def modify_common_server_constants(code_path: Path, content_version: str, branch_name: Optional[str] = None): """ Modify content/Packs/Base/Scripts/CommonServerPython.py global variables: a. CONTENT_RELEASE_VERSION to given content version flag. b. CONTENT_BRANCH_NAME to active branch Args: code_path: Packs/Base/Scripts/CommonServerPython.py full code path. branch_name: branch name to update in CONTENT_BRANCH_NAME content_version: content version to update in CONTENT_RELEASE_VERSION """ file_content_new = re.sub(r"CONTENT_RELEASE_VERSION = '\d.\d.\d'", f"CONTENT_RELEASE_VERSION = '{content_version}'", code_path.read_text()) file_content_new = re.sub(r"CONTENT_BRANCH_NAME = '\w+'", f"CONTENT_BRANCH_NAME = '{branch_name}'", file_content_new) code_path.write_text(file_content_new) ######################## # Suffix add functions # ######################## def suffix_handler(artifact_manager: ArtifactsManager): """ Add suffix to file names exclude: 1. pack_metadata.json 2. README. 3. content_descriptor.json 3. ReleaseNotes/** Include: 1. *.json 2. *.(yaml|yml) Args: artifact_manager: Artifacts manager object. """ files_pattern_to_add_suffix = "!reputations.json|!pack_metadata.json|" \ "!doc-*.json|!content-descriptor.json|*.{json,yml,yaml}" if artifact_manager.suffix: files_content_packs = artifact_manager.content_packs_path.rglob( files_pattern_to_add_suffix, flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE) files_content_test = artifact_manager.content_test_path.rglob(files_pattern_to_add_suffix, flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE) files_content_new = artifact_manager.content_new_path.rglob(files_pattern_to_add_suffix, flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE) files_content_all = artifact_manager.content_all_path.rglob(files_pattern_to_add_suffix, flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE) for files in [files_content_new, files_content_packs, files_content_test, files_content_all]: for file in files: file_name_split = file.name.split('.') file_real_stem = ".".join(file_name_split[:-1]) suffix = file_name_split[-1] file.rename(file.with_name(f'{file_real_stem}{artifact_manager.suffix}.{suffix}')) ########### # Helpers # ########### class DuplicateFiles(Exception): def __init__(self, exiting_file: Path, src: Path): """ Exception raised when 2 files with the same name existing in same directory when creating artifacts Args: exiting_file: File allready exists in artifacts. src: File source which copy or link to same directory. """ self.exiting_file = exiting_file self.src = src self.msg = f"\nFound duplicate files\n1. {src}\n2. {exiting_file}" def dump_link_files(artifact_manager: ArtifactsManager, content_object: ContentObject, dest_dir: Path, created_files: Optional[List[Path]] = None) -> List[Path]: """ Dump content object to requested destination dir. Due to performance issue if known files already created and dump is done for the same object, This function will link files instead of creating the files from scratch (Reduce unify, split etc.) Args: artifact_manager: Artifacts manager object. content_object: Content object. dest_dir: Destination dir. created_files: Pre-created file (Not mandatory). Returns: List[Path]: List of new created files. Raises: DuplicateFiles: Exception occurred if duplicate files exists in the same dir (Protect from override). """ new_created_files = [] # Handle case where files already created if created_files: for file in created_files: new_file = dest_dir / file.name if new_file.exists() and new_file.stat().st_mtime >= artifact_manager.execution_start: raise DuplicateFiles(new_file, content_object.path) else: os.link(file, new_file) new_created_files.append(new_file) # Handle case where object first time dump. else: target = dest_dir / content_object.normalize_file_name() if target.exists() and target.stat().st_mtime >= artifact_manager.execution_start: raise DuplicateFiles(target, content_object.path) else: new_created_files.extend(content_object.dump(dest_dir=dest_dir)) return new_created_files def calc_relative_packs_dir(artifact_manager: ArtifactsManager, content_object: ContentObject) -> Path: relative_pack_path = artifact_manager.get_relative_pack_path(content_object) if ((INTEGRATIONS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != INTEGRATIONS_DIR) or (SCRIPTS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != SCRIPTS_DIR)): relative_pack_path = relative_pack_path.parent.parent else: relative_pack_path = relative_pack_path.parent return relative_pack_path def child_mute(): """Mute child process inorder to keep log clean""" sys.stdout = open(os.devnull, 'w') ################################### # Artifacts Directories functions # ################################### @contextmanager def ArtifactsDirsHandler(artifact_manager: ArtifactsManager): """ Artifacts Directories handler. Logic by time line: 1. Delete artifacts directories if exists. 2. Create directories. 3. If any error occurred -> Delete artifacts directories -> Exit. 4. If finish successfully: a. If zip: 1. Sign packs if needed. 2. Zip artifacts zip. 3. Zip packs for uploading. 4. Delete artifacts directories. 5. log report. Args: artifact_manager: Artifacts manager object. """ try: delete_dirs(artifact_manager) create_dirs(artifact_manager) yield except (Exception, KeyboardInterrupt): delete_dirs(artifact_manager) artifact_manager.exit_code = EX_FAIL else: if artifact_manager.zip_artifacts: sign_packs(artifact_manager) zip_packs(artifact_manager) zip_dirs(artifact_manager) delete_dirs(artifact_manager) report_artifacts_paths(artifact_manager) def delete_dirs(artifact_manager: ArtifactsManager): """Delete artifacts directories""" for artifact_dir in artifact_manager.get_dir_to_delete(): if artifact_dir.exists(): rmtree(artifact_dir) def create_dirs(artifact_manager: ArtifactsManager): """Create artifacts directories""" if artifact_manager.only_content_packs: artifact_manager.content_packs_path.mkdir(parents=True) else: for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path, artifact_manager.content_packs_path, artifact_manager.content_all_path]: artifact_dir.mkdir(parents=True) def zip_dirs(artifact_manager: ArtifactsManager): """Zip artifacts directories""" if artifact_manager.only_content_packs: make_archive(artifact_manager.content_packs_path, 'zip', artifact_manager.content_packs_path) else: with ProcessPoolHandler(artifact_manager) as pool: for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path, artifact_manager.content_packs_path, artifact_manager.content_all_path]: pool.schedule(make_archive, args=(artifact_dir, 'zip', artifact_dir)) def zip_packs(artifact_manager: ArtifactsManager): """Zip packs directories""" with ProcessPoolHandler(artifact_manager) as pool: for pack_name, pack in artifact_manager.packs.items(): dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id) zip_path = os.path.join(artifact_manager.content_uploadable_zips_path, pack.id) pool.schedule(make_archive, args=(zip_path, 'zip', dumped_pack_dir)) def report_artifacts_paths(artifact_manager: ArtifactsManager): """Report artifacts results destination""" global logger logger.info("\nArtifacts created:") if artifact_manager.zip_artifacts: template = "\n\t - {}.zip" else: template = "\n\t - {}" logger.info(template.format(artifact_manager.content_packs_path)) if not artifact_manager.only_content_packs: for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path, artifact_manager.content_all_path]: logger.info(template.format(artifact_dir)) if artifact_manager.zip_artifacts: logger.info(f'\n\t - {artifact_manager.content_uploadable_zips_path}') def sign_packs(artifact_manager: ArtifactsManager): """Sign packs directories""" global logger if artifact_manager.signDirectory and artifact_manager.signature_key: with ProcessPoolHandler(artifact_manager) as pool: with open('keyfile', 'wb') as keyfile: keyfile.write(artifact_manager.signature_key.encode()) futures: List[ProcessFuture] = [] if 'all' in artifact_manager.pack_names: for pack_name, pack in artifact_manager.packs.items(): dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id) futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir, artifact_manager.signDirectory, ))) else: for pack_name in artifact_manager.pack_names: if pack_name in artifact_manager.packs: pack = artifact_manager.packs[pack_name] dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id) futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir, artifact_manager.signDirectory, ))) wait_futures_complete(futures, artifact_manager) elif artifact_manager.signDirectory or artifact_manager.signature_key: logger.error('Failed to sign packs. In order to do so, you need to provide both signature_key and ' 'sign_directory arguments.')
""" Create futures which dumps conditionally content/Packs. Args: artifact_manager: Artifacts manager object. pool: Process pool to schedule new processes. Returns: List[ProcessFuture]: List of pebble futures to wait for. """ futures = [] if 'all' in artifact_manager.pack_names: for pack_name, pack in artifact_manager.packs.items(): if pack_name not in IGNORED_PACKS: futures.append(pool.schedule(dump_pack, args=(artifact_manager, pack))) else: for pack_name in artifact_manager.pack_names: if pack_name not in IGNORED_PACKS and pack_name in artifact_manager.packs: futures.append(pool.schedule(dump_pack, args=(artifact_manager, artifact_manager.packs[pack_name]) )) return futures
vggish_train_demo.py
# Copyright 2017 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""A simple demonstration of running VGGish in training mode. This is intended as a toy example that demonstrates how to use the VGGish model definition within a larger model that adds more layers on top, and then train the larger model. If you let VGGish train as well, then this allows you to fine-tune the VGGish model parameters for your application. If you don't let VGGish train, then you use VGGish as a feature extractor for the layers above it. For this toy task, we are training a classifier to distinguish between three classes: sine waves, constant signals, and white noise. We generate synthetic waveforms from each of these classes, convert into shuffled batches of log mel spectrogram examples with associated labels, and feed the batches into a model that includes VGGish at the bottom and a couple of additional layers on top. We also plumb in labels that are associated with the examples, which feed a label loss used for training. Usage: # Run training for 100 steps using a model checkpoint in the default # location (vggish_model.ckpt in the current directory). Allow VGGish # to get fine-tuned. $ python vggish_train_demo.py --num_batches 100 # Same as before but run for fewer steps and don't change VGGish parameters # and use a checkpoint in a different location $ python vggish_train_demo.py --num_batches 50 \ --train_vggish=False \ --checkpoint /path/to/model/checkpoint """ from __future__ import print_function from random import shuffle import numpy as np import tensorflow as tf import vggish_input import vggish_params import vggish_slim flags = tf.app.flags slim = tf.contrib.slim flags.DEFINE_integer( 'num_batches', 30, 'Number of batches of examples to feed into the model. Each batch is of ' 'variable size and contains shuffled examples of each class of audio.') flags.DEFINE_boolean( 'train_vggish', True, 'If True, allow VGGish parameters to change during training, thus ' 'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using ' 'VGGish as a fixed feature extractor.') flags.DEFINE_string( 'checkpoint', 'vggish_model.ckpt', 'Path to the VGGish checkpoint file.') FLAGS = flags.FLAGS _NUM_CLASSES = 3 def
(): """Returns a shuffled batch of examples of all audio classes. Note that this is just a toy function because this is a simple demo intended to illustrate how the training code might work. Returns: a tuple (features, labels) where features is a NumPy array of shape [batch_size, num_frames, num_bands] where the batch_size is variable and each row is a log mel spectrogram patch of shape [num_frames, num_bands] suitable for feeding VGGish, while labels is a NumPy array of shape [batch_size, num_classes] where each row is a multi-hot label vector that provides the labels for corresponding rows in features. """ # Make a waveform for each class. num_seconds = 5 sr = 44100 # Sampling rate. t = np.linspace(0, num_seconds, int(num_seconds * sr)) # Time axis. # Random sine wave. freq = np.random.uniform(100, 1000) sine = np.sin(2 * np.pi * freq * t) # Random constant signal. magnitude = np.random.uniform(-1, 1) const = magnitude * t # White noise. noise = np.random.normal(-1, 1, size=t.shape) # Make examples of each signal and corresponding labels. # Sine is class index 0, Const class index 1, Noise class index 2. sine_examples = vggish_input.waveform_to_examples(sine, sr) sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0]) const_examples = vggish_input.waveform_to_examples(const, sr) const_labels = np.array([[0, 1, 0]] * const_examples.shape[0]) noise_examples = vggish_input.waveform_to_examples(noise, sr) noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0]) # Shuffle (example, label) pairs across all classes. all_examples = np.concatenate((sine_examples, const_examples, noise_examples)) all_labels = np.concatenate((sine_labels, const_labels, noise_labels)) labeled_examples = list(zip(all_examples, all_labels)) shuffle(labeled_examples) # Separate and return the features and labels. features = [example for (example, _) in labeled_examples] labels = [label for (_, label) in labeled_examples] return (features, labels) def main(_): with tf.Graph().as_default(), tf.Session() as sess: # Define VGGish. embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish) # Define a shallow classification model and associated training ops on top # of VGGish. with tf.variable_scope('mymodel'): # Add a fully connected layer with 100 units. num_units = 100 fc = slim.fully_connected(embeddings, num_units) # Add a classifier layer at the end, consisting of parallel logistic # classifiers, one per class. This allows for multi-class tasks. logits = slim.fully_connected( fc, _NUM_CLASSES, activation_fn=None, scope='logits') tf.sigmoid(logits, name='prediction') # Add training ops. with tf.variable_scope('train'): global_step = tf.Variable( 0, name='global_step', trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP]) # Labels are assumed to be fed as a batch multi-hot vectors, with # a 1 in the position of each positive class label, and 0 elsewhere. labels = tf.placeholder( tf.float32, shape=(None, _NUM_CLASSES), name='labels') # Cross-entropy label loss. xent = tf.nn.sigmoid_cross_entropy_with_logits( logits=logits, labels=labels, name='xent') loss = tf.reduce_mean(xent, name='loss_op') tf.summary.scalar('loss', loss) # We use the same optimizer and hyperparameters as used to train VGGish. optimizer = tf.train.AdamOptimizer( learning_rate=vggish_params.LEARNING_RATE, epsilon=vggish_params.ADAM_EPSILON) optimizer.minimize(loss, global_step=global_step, name='train_op') # Initialize all variables in the model, and then load the pre-trained # VGGish checkpoint. sess.run(tf.global_variables_initializer()) vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint) # Locate all the tensors and ops we need for the training loop. features_tensor = sess.graph.get_tensor_by_name( vggish_params.INPUT_TENSOR_NAME) labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0') global_step_tensor = sess.graph.get_tensor_by_name( 'mymodel/train/global_step:0') loss_tensor = sess.graph.get_tensor_by_name('mymodel/train/loss_op:0') train_op = sess.graph.get_operation_by_name('mymodel/train/train_op') # The training loop. for _ in range(FLAGS.num_batches): (features, labels) = _get_examples_batch() [num_steps, loss, _] = sess.run( [global_step_tensor, loss_tensor, train_op], feed_dict={features_tensor: features, labels_tensor: labels}) print('Step %d: loss %g' % (num_steps, loss)) if __name__ == '__main__': tf.app.run()
_get_examples_batch
amserialwithdwnld.js
import React, { Component } from 'react'; import Rx from '@reactivex/rxjs'; import AmCharts from "@amcharts/amcharts3-react"; //import mockdata from '../../json/AmchartData.json'; //import apiData from '../../json/ApiData.json'; import { connect } from "react-redux" // var env = process.env.NODE_ENV || 'dev'; // const envVar = require('../../uiconfig/config.' + env); //const ProviderCountAPI = envVar.uri + envVar.port + envVar.registry + envVar.project + apiData.apis.getGraphCount; //const ProviderCountAPI = 'http://127.0.0.1:9001/registry/pegasus/provider/graphCount'; // const datatest = { // year: '2017' // } // const InitParamProv = { // method:'POST', // headers : {'Content-Type': 'application/json'}, // body: JSON.stringify(datatest) // }; // const InitParamProvRxjs = { // year: '2017' // }; class
extends React.Component { // constructor(props) { // super(props); // const initialWidth = window.innerWidth > 0 ? window.innerWidth : 500; // this.state = { // windowWidth: initialWidth - 100, // componentWidth: 300, // providerArray: [], // messages: [] // }; // } // getFromProviderApiPromise() { // fetch(ProviderCountAPI, InitParamProv). // then((response) => response.json()) // .then((responseJson) => { // this.setState({ providerArray: responseJson.result }); // console.log('Promise Call:', this.state.providerArray); // }); // } // getFromProviderApiRedu() { // const fetchData = Rx.Observable // .ajax // .post(ProviderCountAPI, InitParamProvRxjs) // .scan((messages, message) => [message].concat(messages), []) // .subscribe(data => { // //console.log(data[0].response.result); // this.setState({ messages: data[0].response.result }) // } // ); // return fetchData; // } /** * * Rxjs with POST * @returns * @memberof AmSerialAreaChart */ // getFromProviderApiRxjs() { // const fetchData = Rx.Observable // .ajax // .post(ProviderCountAPI, InitParamProvRxjs) // .scan((messages, message) => [message].concat(messages), []) // .subscribe(data => { // //console.log(data[0].response.result); // this.setState({ messages: data[0].response.result }) // } // ); // return fetchData; // } //componentDidMount() { //this.getFromProviderApiPromise(); //this.getFromProviderApiRedu(); //this.getFromProviderApiRxjs(); //} render() { //console.log(this.props.providerReducer.providerData.result); //console.log(this.props); //const {tweets,this.props.providerReducer.providerData.result } = this.props; //console.log("from amchart", this.props.providerReducer.providerData); //console.log("from 2 amchart", this.state.messages); //var config = ; return ( <div className="areaChartAreawithdownload"> {this.props.providerReducer.providerData.result ? ( <AmCharts.React style={{ width: "100%", height: "300px" }} options={{ "hideCredits": true, "type": "serial", "theme": "light", "marginRight": 80, "dataProvider": this.props.providerReducer.providerData.result, "valueAxes": [{ "position": "left", "title": "Provider Count" }], "graphs": [{ "id": "g1", "fillAlphas": 0.4, "valueField": "Active", "balloonText": "<div style='margin:2px; font-size:10px;'>Active:<b>[[value]]</b></div>" }, { "id": "g2", "lineColor": "#800080", "fillAlphas": 0.4, "valueField": "Inactive", "balloonText": "<div style='margin:2px; font-size:10px;'>Inactive:<b>[[value]]</b></div>" }], "chartScrollbar": { "graph": "g1", "scrollbarHeight": 40, "backgroundAlpha": 0, "selectedBackgroundAlpha": 0.1, "selectedBackgroundColor": "#888888", "graphFillAlpha": 0, "graphLineAlpha": 0.5, "selectedGraphFillAlpha": 0, "selectedGraphLineAlpha": 1, "autoGridCount": true, "color": "#AAAAAA" }, "chartCursor": { "categoryBalloonDateFormat": "JJ:NN, DD MMMM", "cursorPosition": "mouse" }, "categoryField": "Date", "categoryAxis": { "minPeriod": "mm", "parseDates": true }, "legend": { "data": [{ title: "Active", color: "rgb(114,188,247)", }, { title: "InActive", color: "#800080", }], "position": "top" }, "export": { "enabled": true, "dateFormat": "YYYY-MM-DD HH:NN:SS" } }} /> ) : ( <div> <div className="ui active inverted dimmer"> <div className="ui medium text loader">Loading</div> </div> </div> )} </div> ) } } const mapStateToProps = (state) => { return state; } const DefaultApp = connect(mapStateToProps)(AmSerialAreaChartWithDwnld); export default DefaultApp;
AmSerialAreaChartWithDwnld
state_flags.py
# Copyright 2021 Google # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Executable flags used for learn_states_*** modules.""" from absl import flags FLAGS = flags.FLAGS
"n_paulis", 20, "Number of random Pauli string circuits to generate." ) # Note this experiment is far less efficient than learn_dynamics_***. # Here batch_size shots are drawn via run_sweep until n_shots measurements # are reached for each circuit. flags.DEFINE_integer( "n_sweeps", 500, "Number of sweeps to send over the wire per circuit (value does not affect results).", ) flags.DEFINE_integer( "n_shots", 500, "Number of measurements to draw from each individual circuit." ) flags.DEFINE_string( "save_dir", "./recirq/qml_lfe/data", "Path to save experiment data (must already exist).", ) flags.DEFINE_bool("use_engine", False, "Whether or not to use quantum engine.")
flags.DEFINE_integer("n", None, "Number of qubits to use.") flags.DEFINE_integer(
display_hdf5.py
from utils.misc import read_hdf5 from utils.misc import extract_deps_from_weights_file import sys import numpy as np wf = sys.argv[1] deps = extract_deps_from_weights_file(wf) di = read_hdf5(wf) num_kernel_params = 0 conv_kernel_cnt = 0 matrix_param_cnt = 0 vec_param_cnt = 0 bias_cnt = 0 beta_cnt = 0 gamma_cnt = 0 mu_cnt = 0 var_cnt = 0 for name, array in di.items():
if array.ndim in [2, 4]: num_kernel_params += array.size if 'base_mask' in name: print(name, array) print(name, array.shape, np.mean(array), np.std(array), ' positive {}, negative {}, zeros {}, near-zero {}'.format(np.sum(array > 0), np.sum(array < 0), np.sum(array == 0), np.sum(np.abs(array) <= 1e-5))) if array.ndim == 2: matrix_param_cnt += array.size elif array.ndim == 1: vec_param_cnt += array.size elif array.ndim == 4: conv_kernel_cnt += array.size if 'running_mean' in name or 'moving_mean' in name: mu_cnt += array.size elif 'running_var' in name or 'moving_var' in name: var_cnt += array.size elif ('weight' in name and 'bn' in name.lower()) or 'gamma' in name: gamma_cnt += array.size elif ('bias' in name and 'bn' in name.lower()) or 'beta' in name: beta_cnt += array.size elif 'bias' in name: bias_cnt += array.size elif 'spatial_mask' in name: print(array) print(np.sum(array)) print('number of kernel params: ', num_kernel_params) print('vec {}, matrix {}, conv {}, total {}'.format(vec_param_cnt, matrix_param_cnt, conv_kernel_cnt, vec_param_cnt + matrix_param_cnt + conv_kernel_cnt)) print('mu {}, var {}, gamma {}, beta {}, bias {}'.format(mu_cnt, var_cnt, gamma_cnt, beta_cnt, bias_cnt)) print('Model deps: {}'.format(deps))
app.py
from calblog import app if __name__ == '__main__':
app.run(port=34630, debug=True)
main_de.js.uncompressed.js
define({
'builder/nls/builder':{"common":{"ok":"OK","cancel":"Abbrechen","save":"Speichern","doNotSave":"Nicht speichern","saved":"Gespeichert","saving":"Wird gespeichert","widgetName":"Widget-Name","emptyMessage":"Bitte wählen Sie ein Widget aus.","chooseWidget":"Widget auswählen","deleting":"Löschen","duplicating":"Duplizieren","uploading":"Hochladen","updating":"Wird aktualisiert"},"apps":{"welcomeMessage":"Willkommen bei Web AppBuilder for ArcGIS!","appCreate":"Neu erstellen","appCreateTitle":"Neue App erstellen","appUpload":"Hochladen","appImport":"Importieren","appName":"App-Name","appDesc":"Beschreibung","appTime":"Änderungsdatum","appModified":"Geändert von","appCreator":"Erstellt von","importTips":"Eine App aus dem ArcGIS-Portal importieren","importApp":"App importieren","upgradeApp":"App aktualisieren","upgradeBtn":"Aktualisieren","checkVersionError":"Die App ${APP_NAME} konnte nicht importiert werden! Weitere Informationen finden Sie im Protokoll.","importing":"Importieren","upgrading":"Aktualisieren","upgradeContent":"Diese App wurde mit einer älteren Version ${OLD_VERSION} von Web AppBuilder erstellt. Sie müssen Sie auf Version ${LATEST_VERSION} aktualisieren, bevor sie in diesem Web AppBuilder verwendet werden kann.","upgradeContentFooter":"Klicken Sie zum Aktualisieren und Importieren auf \"Aktualisieren\". Klicken Sie auf \"Abbrechen\", um den Vorgang zu beenden.","importUpperVersion":"Die App ${APP_NAME} konnte nicht importiert werden. Diese App wurde mit einer neueren Version ${VERSION_NUMBER} von Web AppBuilder erstellt.","importSuccess":"Die App ${APP_NAME} wurde erfolgreich aktualisiert und importiert!","upgradeFailure":"Die App ${APP_NAME} konnte nicht aktualisiert werden! Weitere Informationen finden Sie im Protokoll.","importFailure":"Die App ${APP_NAME} konnte nicht importiert werden! Weitere Informationen finden Sie im Protokoll.","edit":"Bearbeiten","launch":"Starten","createFromHere":"Von hier aus erstellen","remove":"Entfernen","deleteMenu":"Löschen","duplicate":"Duplizieren","editAppInfo":"App-Info bearbeiten","download":"Herunterladen","agolTemp":"Als Vorlage exportieren","createAppFailedMeg":"App konnte nicht erstellt werden.","noAppName":"App-Name muss angegeben werden.","confirmDeleteApp":"Soll diese App wirklich gelöscht werden?","deleteAppTitle":"App löschen","downloadFailedTip":"App konnte nicht heruntergeladen werden. Versuchen Sie es erneut.","savePublish":"Geben Sie die Informationen unten ein, um Ihre Webanwendung zu speichern und zu veröffentlichen. Zeigen Sie die Elementdetails dieser Webanwendung an, um die URL zur Anwendung zu ermitteln.","appBuilderMsg":"Hilfe bei den ersten Schritten","appBuilderTitle":"Web AppBuilder for ArcGIS","templateTitle":"Sie sind im Begriff die aktuellen Parametereinstellungen zu exportieren und ein neues Web-App-Vorlagenelement in Ihrem Portal zu erstellen.","explain1":"Das exportierte Vorlagenelement verwendet die Standard-App-URL aus Web AppBuilder. Wenn Sie diese App für einen anderen Webserver bereitstellen, müssen Sie diese URL auf der Detailseite des Elements ändern.","titleLabel":"Titel:","tagsLabel":"Tags:","summaryLabel":"Beschreibung:","saveFolderLabel":"Speichern in Ordner:","shareSameLevel":"Diese App auf die gleiche Weise freigeben wie die Karte","createAppUrlError":"App erfolgreich erstellt, aber 'App freigeben' fehlgeschlagen.","saveAsFaied":"Speichern unter fehlgeschlagen.","deleteFalse":"App konnte nicht entfernt werden. Die Antwort von removeApp lautet 'nicht erfolgreich'!","deleteError":"Die App konnte nicht entfernt werden.","duplicateFalse":"Die App konnte nicht dupliziert werden. Die Antwort von Duplizieren lautet 'nicht erfolgreich'!","duplicateError":"Die App konnte nicht dupliziert werden.","editAppFalse":"Die Antwort von updateApp lautet 'nicht erfolgreich'.","editAppTrue":"App wurde erfolgreich aktualisiert, die App-ID lautet ","editError":"'App-Name' oder 'App-Beschreibung' konnte nicht aktualisiert werden!","addTags":"Tag(s) hinzufügen","_copy":"-copy","noTitleMessage":"Sie müssen einen Titel für die Vorlage sowie Tags angeben, damit die Karte in Suchvorgängen gefunden werden kann.","exportTemplateFailed1":"Export beim Hinzufügen von templateItem zum Portal fehlgeschlagen.","exportTemplateFailed2":"Export beim Abrufen von templateGroup aus dem Portal fehlgeschlagen.","exportTemplateFailed3":"Export erfolgreich! Das exportierte Vorlagenelement wurde jedoch nicht für die Gruppe €œWeb-App-Vorlagen” freigegeben. Sie müssen es manuell für die Gruppe freigeben, damit es zur Erstellung von Apps verfügbar ist.","exportTemplateFailed4":"Export erfolgreich! Das Element konnte jedoch nicht für templateGroup freigegeben werden.","exportTemplateFailed5":"Export beim Abrufen des Portal-Benutzers fehlgeschlagen.","exportTemplateFailed31":"Export erfolgreich! Ihnen sind jedoch anscheinend keine Berechtigungen zum Freigeben der exportierten Vorlage","exportTemplateFailed32":"für die in Ihrer Organisation angegebene Vorlagengruppe zugewiesen. Sie müssen sie manuell zur Gruppe hinzufügen, sobald darauf zugegriffen werden kann.","descriptionTemplateUrl1":"Klicken Sie","descriptionTemplateUrl2":"hier","descriptionTemplateUrl21":"Element","descriptionTemplateUrl3":", um die Details der exportierten Vorlage anzuzeigen.","exportSuccessful":"Erfolgreich als Web-App-Vorlage exportiert.","exportMessageTitle":"Als Web-App-Vorlage exportieren","templateDefaultSnippet":"Eine von Web AppBuilder erstellte konfigurierbare Anwendungsvorlage.","exportSuccessfulUpdate":"Erfolgreich als Web-App-Vorlage aktualisiert.","exportMessageTitleUpdate":"Web-App-Vorlage aktualisieren","exportTemplateFailed6":"Aktualisierung konnte beim Abrufen des Portal-Benutzers nicht ausgeführt werden.","exportTemplateFailed7":"Vorlagenelement konnte nicht aktualisiert werden."},"header":{"appDefaultAttributes":"Standard App-Attribute","help":"Hilfe","settings":"Einstellungen","signIn":"Anmelden","signOut":"Abmelden","saveSuccessfully":"Speichern erfolgreich!","saveFailed":"App konnte nicht gespeichert werden.","home":"Startseite","gallery":"Galerie","map":"Karte","groups":"Gruppen","content":"Eigene Inhalte","organization":"Eigene Organisation","logoTitle":"Web AppBuilder for ArcGIS","profile":"Profil","backToHomePage":"Zurück zur Startseite"},"settings":{"settings":"Standard App-Attribute festlegen","lSettings":"App-Attribute festlegen","showAdSetting":"+ Erweiterte Einstellungen anzeigen","hideAdSetting":"Erweiterte Einstellungen ausblenden...","bingMapKey":"Bing Maps-Schlüssel","bingMapId":"Bing Maps-ID","defaultPortalUrl":"Standard-Portal-URL","portalUrl":"Portal-URL","geometryServiceUrl":"Geometrie-Service-URL","geometryServiceError":"Die eingegebene URL des Geometrie-Service ist ungültig oder es kann nicht darauf zugegriffen werden.","routeServiceUrl":"Routen-Service-URL","geocodeServiceUrl":"Geokodierungs-Service-URL","printTaskUrl":"Druck-Task-URL","httpProxyUrl":"HTTP-Proxy-URL","appId":"App-ID","locale":"Gebietsschema","save":"Speichern","themeRepo":"Design-Repository","widgetRepo":"Widget-Repository","loadHelperServices":"Hilfs-Services laden","loadServiceError":"Die Portal-URL ist ungültig. Die Portal-URL hat normalerweise folgendes Muster: http(s)://www.arcgis.com/ oder http(s)://&lt;portal_server&gt;/&lt;instance_name&gt;","webMapError":"Standard-Webkarte kann nicht über portalUrl abgerufen werden","helpText":{"bingMapKey":"Für den Zugang zu Bing Maps und Bing Geocoder ist der Bing Maps-Schlüssel erforderlich.","defaultPortalUrl":"Die URL von ArcGIS Online oder Ihres lokalen Portal for ArcGIS.","geometryServiceUrl":"Der Geometrie-Service, der von bestimmten Widgets für geometrische Funktionen verwendet wird.","geocodeServiceUrl":"Der Geokodierungs-Service, der von bestimmten Widgets für Geokodierungsvorgänge verwendet wird.","routeServiceUrl":"Der Routen-Service, der von bestimmten Widgets für Routingvorgänge verwendet wird.","printTaskUrl":"Der Druck-Task, der von bestimmten Widgets für Druckvorgänge verwendet wird.","httpProxyUrl":"Für den Zugang zu geschütztem Inhalt oder Inhalt, der auf einem anderen Server gehostet wird als Ihre Anwendung (cross-domain Problem), ist ein HTTP-Proxy erforderlich.","locale":"Gebietsschema, das für Ihre Web-App verwendet wird.","appId":"Die ID, die in der App für die Anmeldung bei ArcGIS Online mit der OAuth2-Autorisierung verwendet wird.","themeRepo":"Design-Repository, das in Web AppBuilder verwendet wird.","widgetRepo":"Widgets-Repository, das in Web AppBuilder verwendet wird."}},"leftPane":{"themes":"Designs","map":"Karte","widgets":"Widgets","attributes":"Attribute","width":"Breite","height":"Höhe","selectDevice":"Wählen Sie oben ein Gerät für die Vorschau oder zum Anpassen aus","share":"Freigeben","previewMore":"Vorschau","back2Configure":"Konfigurieren","unSaveMegBegin":"Möchten Sie die Änderungen speichern ","unSaveMegEnd":"?","canNotSaveMeg":"Dies ist eine Probesversion von Web AppBuilder for ArcGIS. Damit Ihre Anwendung von Web AppBuilder for ArcGIS gespeichert werden kann, müssen Sie sich in Ihrer Organisation anmelden.","saveSuccessMeg":"Speichern erfolgreich","saveFailedMeg":"Speichern der App fehlgeschlagen","toHomeTitle":"Web AppBuilder for ArcGIS","saveAs":"Speichern unter","launch":"Starten","unloadMessage":", an der Sie arbeiten, enthält nicht gespeicherte Änderungen.","theApp":"Die Anwendung","qrcodeDesc":"QR-Code scannen, um die Anwendung auf Ihrem Mobilgerät anzuzeigen"},"themes":{"themeTitle":"Designs","styleTitle":"Style","layoutTitle":"Layout","appLayout":"Layout der App","themeChangeMessage":"Es werden nur im Controller konfigurierte Widgets von dem neuen Design übernommen. Die Bildschirm-Widgets müssen in dem neuen Design überprüft und neu konfiguriert werden. Möchten Sie den Vorgang fortsetzen?"},"mapConfig":{"map":"Karte","selectWebMap":"Webkarte auswählen","searchMapName":"Nach Kartenname suchen...","searchNone":"Die gesuchten Informationen konnten nicht gefunden werden. Versuchen Sie es mit anderen Informationen.","groups":"Gruppen","noneGroups":"Keine Gruppen","signInTip":"Ihre Anmeldesitzung ist abgelaufen. Aktualisieren Sie Ihren Browser, um sich erneut bei Ihrem Portal anzumelden.","signIn":"Anmelden","publicMap":"Öffentlichkeit","myOrganization":"Eigene Organisation","myGroup":"Eigene Gruppen","myContent":"Eigene Inhalte","setExtentTip":"Navigieren Sie über die Karte rechts zur richtigen Ausdehnung, und klicken Sie hier auf die Schaltfläche \"Anfangsausdehnung festlegen\", um die erste Kartenausdehnung der App festzulegen.","setExtent":"Anfangsausdehnung festlegen","count":"Anzahl","fromPortal":"aus Portal","fromOnline":"aus ArcGIS.com","noneThumbnail":"Miniaturansicht nicht verfügbar","changeMap":"Webkarte auswählen","owner":"Besitzer","signInTo":"Melden Sie sich an bei","lastModified":"Zuletzt geändert","moreDetails":"Mehr Details","originalExtentTip":"Ursprüngliche Ausdehnung der Karte wiederherstellen","setInitialExtent":"Aktuelle Kartenansicht verwenden","restoreInitialExtent":"Standardausdehnung der Webkarte verwenden","statement":"Geben Sie die anfängliche räumliche Ausdehnung der Karte beim Start der Anwendung an.","mapSwitchConfimation":"Kartenwechsel bestätigen","mapSwitchConfimationTip":"Sie wechseln zu einer neuen Karte. Widgets, die für die vorherige Karte konfiguriert wurden, sind möglicherweise ungültig. Wenn sie ungültig sind, wird die Standard-Widget-Konfiguration angewendet. Sie sollten diese Widgets überprüfen und bei Bedarf neu konfigurieren. Wenn Sie zur vorherigen Karte wechseln, werden die zugehörigen Widget-Konfigurationen wiederhergestellt.","theseWidgetsAre":"Diese Widgets sind","sureToSwitch":"Möchten Sie wirklich zu dieser Webkarte wechseln?","clickChooseWebMap":"Klicken, um Webkarte auszuwählen","viewItemDetails":"Elementdetails anzeigen"},"widgets":{"openAtStart":"Beim Start öffnen","jsonEditor":"JSON-Editor","back":"Zurück","widgetLabel":"Widget-Beschriftung","changeIcon":"Widget-Symbol ändern","more":"Weitere Informationen zu diesem Widget","dropWidgetMessage":"Möchten Sie dieses Widget wirklich löschen?","dropGroupMessage":"Möchten Sie diese Gruppe wirklich löschen?","setControlledWidgets":"Widgets in diesem Controller festlegen","setControlledWidgetsBy":"Widgets festlegen, die verwaltet werden von ","noConfig":"Keine Konfiguration mehr.","notFinished":"Noch nicht abgeschlossen!","labelExists":"Beschriftung ist vorhanden","configLabel":"Konfigurieren","labelRequired":"Beschriftung erforderlich","widgets":"Widgets","defaultWidgetLabel":"Widget","addWidget":"Hier klicken, um Widget hinzuzufügen","hideWidget":"Dieses Widget ausblenden","showWidget":"Dieses Widget anzeigen","removeWidget":"Dieses Widget entfernen","configureWidget":"Dieses Widget konfigurieren"},"groups":{"label":"Beschriftung","newGroup":"Neue Gruppe"},"attributes":{"headerTitle":"Branding","headerDesc":"Fügen Sie LOGO, Titel und Untertitel für die App hinzu.","addLogo":"Zum Hinzufügen eines LOGOS hier klicken","title":"Titel","subtitle":"Untertitel","appIdTitle":"App-ID","appIdAbout":"Info zu","linksTitle":"Links","addLink":"Neuen Link hinzufügen","proxyUrlPlaceholder":"Doppelklicken, um die Proxy-URL zu bearbeiten","prefixPlaceholder":"Doppelklicken, um das Präfix zu bearbeiten","prefixHeader":"Präfix","proxyUrlHeader":"Proxy-URL","labelPlaceholder":"Doppelklicken, um den Link-Namen zu bearbeiten","urlPlaceholder":"Doppelklicken, um die Link-URL zu bearbeiten","proxySetting":"Proxy-Einstellung","proxyRulesLabel":"Legen Sie Proxy-Regeln zum Routen von Anfragen Ihrer Anwendung fest: ","alwaysUseProxyLabel":"Verwenden Sie den folgenden Proxy für alle Anforderungen, die den oben genannten Regeln nicht entsprechen: ","addProxyRule":"Neue Proxy-Regel hinzufügen","useProxySetting":"Proxy verwenden","proxyUrl":"Proxy-URL","editLabel":"Bearbeiten","deleteLabel":"Löschen"},"serviceUtils":{"duplicateError":"App konnte nicht dupliziert werden.","duplicateUrlError":"App wurde erfolgreich dupliziert, URL konnte jedoch nicht aktualisiert werden.","addItemError":"Element konnte nicht hinzugefügt werden.","addItemUnsuccess":"AddItem-Antworten nicht erfolgreich","createUrlError":"App wurde erfolgreich erstellt, URL konnte jedoch nicht aktualisiert werden.","createError":"App konnte nicht erstellt werden.","createCodeAttachmentError":"App wurde erfolgreich erstellt, \"Code-Anlage\" konnte jedoch nicht hinzugefügt werden."},"setPortalUrl":{"tip":"Geben Sie die URL zu Ihrer Organisation oder zu Portal for ArcGIS an","continueBtn":"Weiter","example":"Beispiel","faq":"FAQs","errPrefix":"Kein Zugriff ","errRemind":"Es konnte kein Server mit dem angegebenen Hostnamen gefunden werden","errOrg":"Geben Sie eine vollständige URL Ihrer ArcGIS Online-Organisation ein, z. B. http://myorg.maps.arcgis.com"},"portalSignIn":{"errorMessage":"Falscher Benutzername oder falsches Kennwort","portalError":"Portal-Fehler","username":"Benutzername","password":"Kennwort","forgot":"Kennwort vergessen","remember":"Benutzername und Kennwort speichern","signin":"Anmelden","back":"Zurück","con":"Fortfahren","namedUserTip":"Web AppBuilder for ArcGIS unterstützt keine öffentlichen Konten. Melden Sie sich mit einem Organisationskonto an.","signingIn":"Anmelden","registeringAppID":"App-ID wird registriert","here":"hier","appIdTip1":"Da Web AppBuilder die angegebene Organisation bzw. das angegebene Portal zum ersten Mal verwendet, ist eine App-ID erforderlich, um den OAuth2-Anmeldeprozess zu unterstützen.","appIdTip2":"Geben Sie Ihren Benutzernamen und Ihr Kennwort für die Organisation oder das Portal an. Nachdem dieser Vorgang erfolgreich abgeschlossen wurde, registriert Web AppBuilder automatisch eine App-ID mit einer neuen Web Mapping-Anwendung namens 'Web AppBuilder for ArcGIS' unter 'Eigene Inhalte'. Das Element darf weder gelöscht noch geändert werden. Weitere Informationen zum Registrieren der App-ID finden Sie "},"agolTemplate":{"title":"Konfigurierbare Parameter festlegen","title2":"Als Web-App-Vorlage exportieren","rightPartHead":"Konfigurierbare Parameter für die Vorlage festlegen","rightPartSubhead":"Parameterkategorien","rightPartHead2":"JSON-Code für Vorlagen","viewJsonCode":"JSON-Code anzeigen","viewJsonBack":"Zurück zu \"Einstellungen\"","ok":"OK","save":"Speichern","download":"Exportieren","update":"Aktualisieren","unSaveMeg":"Möchten Sie die Änderungen speichern?","cancelPopupTitle":"Vorlage für Webkarten-Apps","saveSuccessMeg":"Speichern erfolgreich","appNameTitle":"App-Name: ","exportTitle":"Als Vorlage exportieren","fieldLabel":"Feldbeschriftung:","fieldType":"Feldtyp:","typeSelectString":"String","typeSelectBoolean":"Boolesch","typeSelectNumber":"Zahl","typeSelectOptions":"Optionen","tooltip":"QuickInfo:","placeholder":"Platzhalter:","textBoxType":"Textfeldtyp:","textBoxTypeRichtext":"richtext","textBoxTypeTextarea":"textarea","textBoxTypeTextbox":"textbox","minvalue":"Min-Wert:","maxvalue":"Max-Wert:","places":"Orte:","addNewOption":"Neue Option hinzufügen","label":"Beschriftung:","value":"Wert:"},"_localized":{}} });
LibConfig.ts
import { Tags } from "./Tags.ts"; import { Logger } from "./Logger.ts"; export interface LibConfig { /** * How to connect to a Server. If omitted, we'll try to connect to a UDP server on localhost:8125. */ server?: UDPConfig | TCPConfig | UnixConfig | LoggerConfig; /** * There are many different statistic products out there, that all speak the StatsD protocol, or at least some variant * of it. This property allows us to select different StatD dialects, which could unlock some new features, and may * also change some subtle features behind-the-scenes to work better with this server. (Like how tags get normalized, * for example.) * * Valid dialects: * - `"statsd"`: The official StatsD server, written by Etsy with Node.js. * - `"datadog"`: The version of StatsD supported by DogStatD, the Datadog stat server. * * @default "statsd" */ dialect?: "statsd" | "datadog"; /** * The sampling rate we'll use for metrics. This should be a value between 0 and 1, inclusive. * * For example, if this value is set to 0.1, then 1 out of 10 calls to .count() will actually result in a counter * being sent to the server. HOWEVER, the server will then increase the counter by 10x the amount it normally would * have been increased. This will result in less data being sent over the wire, but with mostly the same ending * values. (Albeit with a bit more error.) * * @default 1.0 (Don't use random sampling) */ sampleRate?: number; /** * StatsD occasionally has some erratic behaviors when dealing with sampleRates. For example, relative gauges don't * have any sampleRate corrections on the server-side, and so would result in the wrong number of adjustments being * made to the data. Same with sets: the wrong number of unique values will be reported if sampleRates are used. * * This setting, when true, will cause us to ignore the sampleRate in metrics that wouldn't handle it well. (Relative * gauges, and Sets.) * * @default true (Only use sampleRate when safe) */ safeSampleRate?: boolean; /** * When we get a metric to send, we'll wait (at most) this number of milliseconds before actually sending it. This * gives us some time for other metrics to be queued up, so we can send them all at once, in the same packet. We may * decide to send the packet sooner (like if it gets too big for the MTU) but in general, this is the maximum amount * of time that your metric will be delayed. * * @default 1000 (1 second) */ maxDelayMs?: number; /** * Tags are key-value pairs that are appended to each metric. * * @default {} */ globalTags?: Tags; /** * This library will not log anything by default. However, if you'd get a little more feedback about how the StatsD * connection is going, you can get that by providing a logger object here. The type of this parameter is pretty * vague, but should be compatible with most versions of the std library's logger: * * @example * import * as log from "https://deno.land/[email protected]/log/mod.ts"; * * await log.setup({ ... }) * * const c = new StatsDClient({ * ... * logger: log.getLogger("statsd"), * }); */ logger?: Logger; } /** * Information needed to connect to a UDP StatsD server. */ export interface UDPConfig { proto: "udp"; /** * The server that we'll send our stats to. *
* @default "localhost" */ host?: string; /** * The server port number that we'll connect to. * * @default 8125 */ port?: number; /** * The Maximum Transmission Unit for the network connection. * * We use this number when figuring out the maximum amount of data that we can send in a single network packet. A * smaller number means more packets will have to be sent, but if we set this value _TOO_ high, it might mean that * packets won't arrive. * * 1500 bytes is usually safe enough for most server networks. Fancy networks that have Jumbo Frames enabled might be * able to bump this value higher, like to 8932 bytes, but worse networks (like if these packets are routed through * the wider internet) might need to reduce the MTU to 512. It all depends on the routers that these packets get * routed through, and how they were configured. * * @default 1500 (Enough bytes for most server networks) */ mtu?: number; } /** * Information needed to connect to a TCP StatsD server. */ export interface TCPConfig { proto: "tcp"; /** * The server that we'll send our stats to. * * @default "localhost" */ host?: string; /** * The server port number that we'll connect to. * * @default 8125 */ port?: number; /** * Sent metrics are queued up for a short bit (see: maxDelayMs) before sending, to increase the number of metrics in * each TCP frame. However, if the backlog exceeds this number of items, we'll send the items sooner. * * @default 100 */ maxQueue?: number; } /** * Information needed to connect to a UDS (Unix Domain Socket) StatsD server. */ export interface UnixConfig { proto: "unix"; /** * The path to the socket file. */ path: string; /** * Sent metrics are queued up for a short bit (see: maxDelayMs) before sending, to decrease the number of filesystem * writes. However, if the backlog exceeds this number of items, we'll send the items sooner. * * @default 100 */ maxQueue?: number; } /** * If the server is set to a "logger" proto, then we'll only log metrics to the standard logger at INFO levels. Useful * for debugging, and if you don't want to actually send anything over the network. */ export interface LoggerConfig { proto: "logger"; }
transform_aggregator.rs
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use common_datablocks::DataBlock; use common_datablocks::HashMethodKind; use common_exception::ErrorCode; use common_exception::Result; use crate::pipelines::new::processors::port::InputPort; use crate::pipelines::new::processors::port::OutputPort; use crate::pipelines::new::processors::processor::Event; use crate::pipelines::new::processors::processor::ProcessorPtr; use crate::pipelines::new::processors::transforms::aggregator::*; use crate::pipelines::new::processors::AggregatorTransformParams; use crate::pipelines::new::processors::Processor; use crate::sessions::QueryContext; pub struct TransformAggregator; impl TransformAggregator { pub fn try_create_final( input_port: Arc<InputPort>, output_port: Arc<OutputPort>, transform_params: AggregatorTransformParams, ctx: Arc<QueryContext>, ) -> Result<ProcessorPtr> { let aggregator_params = transform_params.aggregator_params; if aggregator_params.group_columns_name.is_empty() { return AggregatorTransform::create( input_port, output_port, FinalSingleStateAggregator::try_create(&aggregator_params)?, ); } match aggregator_params.aggregate_functions.is_empty() { true => match transform_params.method { HashMethodKind::KeysU8(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU8FinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU16(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU16FinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU32(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU32FinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU64(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU64FinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), HashMethodKind::SingleString(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SingleStringFinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), HashMethodKind::Serializer(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SerializerFinalAggregator::<false>::create(ctx, method, aggregator_params)?, ), }, false => match transform_params.method { HashMethodKind::KeysU8(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU8FinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU16(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU16FinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU32(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU32FinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), HashMethodKind::KeysU64(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU64FinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), HashMethodKind::SingleString(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SingleStringFinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), HashMethodKind::Serializer(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SerializerFinalAggregator::<true>::create(ctx, method, aggregator_params)?, ), }, } } pub fn try_create_partial( input_port: Arc<InputPort>, output_port: Arc<OutputPort>, transform_params: AggregatorTransformParams, ctx: Arc<QueryContext>, ) -> Result<ProcessorPtr> { let aggregator_params = transform_params.aggregator_params; if aggregator_params.group_columns_name.is_empty() { return AggregatorTransform::create( input_port, output_port, PartialSingleStateAggregator::try_create(&aggregator_params)?, ); } match aggregator_params.aggregate_functions.is_empty() { true => match transform_params.method { HashMethodKind::KeysU8(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU8PartialAggregator::<false>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU16(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU16PartialAggregator::<false>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU32(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU32PartialAggregator::<false>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU64(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU64PartialAggregator::<false>::create(ctx, method, aggregator_params), ), HashMethodKind::SingleString(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SingleStringPartialAggregator::<false>::create(ctx, method, aggregator_params), ), HashMethodKind::Serializer(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SerializerPartialAggregator::<false>::create(ctx, method, aggregator_params),
HashMethodKind::KeysU8(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU8PartialAggregator::<true>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU16(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU16PartialAggregator::<true>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU32(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU32PartialAggregator::<true>::create(ctx, method, aggregator_params), ), HashMethodKind::KeysU64(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, KeysU64PartialAggregator::<true>::create(ctx, method, aggregator_params), ), HashMethodKind::SingleString(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SingleStringPartialAggregator::<true>::create(ctx, method, aggregator_params), ), HashMethodKind::Serializer(method) => AggregatorTransform::create( transform_params.transform_input_port, transform_params.transform_output_port, SerializerPartialAggregator::<true>::create(ctx, method, aggregator_params), ), }, } } } pub trait Aggregator: Sized + Send { const NAME: &'static str; fn consume(&mut self, data: DataBlock) -> Result<()>; fn generate(&mut self) -> Result<Option<DataBlock>>; } enum AggregatorTransform<TAggregator: Aggregator> { ConsumeData(ConsumeState<TAggregator>), Generate(GenerateState<TAggregator>), Finished, } impl<TAggregator: Aggregator + 'static> AggregatorTransform<TAggregator> { pub fn create( input_port: Arc<InputPort>, output_port: Arc<OutputPort>, inner: TAggregator, ) -> Result<ProcessorPtr> { Ok(ProcessorPtr::create(Box::new(AggregatorTransform::< TAggregator, >::ConsumeData( ConsumeState { inner, input_port, output_port, input_data_block: None, }, )))) } pub fn convert_to_generate(self) -> Result<Self> { match self { AggregatorTransform::ConsumeData(s) => { Ok(AggregatorTransform::Generate(GenerateState { inner: s.inner, is_finished: false, output_port: s.output_port, output_data_block: None, })) } _ => Err(ErrorCode::LogicalError("")), } } } impl<TAggregator: Aggregator + 'static> Processor for AggregatorTransform<TAggregator> { fn name(&self) -> &'static str { TAggregator::NAME } fn event(&mut self) -> Result<Event> { match self { AggregatorTransform::Finished => Ok(Event::Finished), AggregatorTransform::Generate(_) => self.generate_event(), AggregatorTransform::ConsumeData(_) => self.consume_event(), } } fn process(&mut self) -> Result<()> { match self { AggregatorTransform::Finished => Ok(()), AggregatorTransform::ConsumeData(state) => state.consume(), AggregatorTransform::Generate(state) => state.generate(), } } } impl<TAggregator: Aggregator + 'static> AggregatorTransform<TAggregator> { #[inline(always)] fn consume_event(&mut self) -> Result<Event> { if let AggregatorTransform::ConsumeData(state) = self { if state.input_data_block.is_some() { return Ok(Event::Sync); } if state.input_port.is_finished() { let mut temp_state = AggregatorTransform::Finished; std::mem::swap(self, &mut temp_state); temp_state = temp_state.convert_to_generate()?; std::mem::swap(self, &mut temp_state); debug_assert!(matches!(temp_state, AggregatorTransform::Finished)); return Ok(Event::Sync); } return match state.input_port.has_data() { true => { state.input_data_block = Some(state.input_port.pull_data().unwrap()?); Ok(Event::Sync) } false => { state.input_port.set_need_data(); Ok(Event::NeedData) } }; } Err(ErrorCode::LogicalError("It's a bug")) } #[inline(always)] fn generate_event(&mut self) -> Result<Event> { if let AggregatorTransform::Generate(state) = self { if state.output_port.is_finished() { let mut temp_state = AggregatorTransform::Finished; std::mem::swap(self, &mut temp_state); return Ok(Event::Finished); } if !state.output_port.can_push() { return Ok(Event::NeedConsume); } if let Some(block) = state.output_data_block.take() { state.output_port.push_data(Ok(block)); return Ok(Event::NeedConsume); } if state.is_finished { if !state.output_port.is_finished() { state.output_port.finish(); } let mut temp_state = AggregatorTransform::Finished; std::mem::swap(self, &mut temp_state); return Ok(Event::Finished); } return Ok(Event::Sync); } Err(ErrorCode::LogicalError("It's a bug")) } } struct ConsumeState<TAggregator: Aggregator> { inner: TAggregator, input_port: Arc<InputPort>, output_port: Arc<OutputPort>, input_data_block: Option<DataBlock>, } impl<TAggregator: Aggregator> ConsumeState<TAggregator> { pub fn consume(&mut self) -> Result<()> { if let Some(input_data) = self.input_data_block.take() { self.inner.consume(input_data)?; } Ok(()) } } struct GenerateState<TAggregator: Aggregator> { inner: TAggregator, is_finished: bool, output_port: Arc<OutputPort>, output_data_block: Option<DataBlock>, } impl<TAggregator: Aggregator> GenerateState<TAggregator> { pub fn generate(&mut self) -> Result<()> { let generate_data = self.inner.generate()?; if generate_data.is_none() { self.is_finished = true; } self.output_data_block = generate_data; Ok(()) } }
), }, false => match transform_params.method {
e2e_suite_test.go
// +build e2e package e2e import ( "fmt" "testing" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework/deploy" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework/exec" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework/helm" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework/keyvault" "github.com/Azure/secrets-store-csi-driver-provider-azure/test/e2e/framework/pod" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" e2eframework "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( clusterProxy framework.ClusterProxy config *framework.Config kubeClient client.Client clientSet *kubernetes.Clientset kvClient keyvault.Client kubeconfigPath string coreNamespaces = []string{ framework.NamespaceKubeSystem, } ) func TestE2E(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "sscdproviderazure") } var _ = BeforeSuite(func() { By("Parsing test configuration") var err error config, err = framework.ParseConfig() Expect(err).To(BeNil()) By("Creating a Cluster Proxy") clusterProxy = framework.NewClusterProxy(initScheme()) kubeClient = clusterProxy.GetClient() clientSet = clusterProxy.GetClientSet() kubeconfigPath = clusterProxy.GetKubeconfigPath() By("Creating a Keyvault Client") kvClient = keyvault.NewClient(config) if config.IsSoakTest { return } if !config.IsHelmTest { By("Installing Secrets Store CSI Driver and Azure Key Vault Provider via kubectl from deployment manifest.") deploy.InstallManifest(kubeconfigPath, config) return } // if helm release exists, it means either cluster upgrade test or backward compatibility test is underway if !helm.ReleaseExists() { By(fmt.Sprintf("Installing Secrets Store CSI Driver and Azure Key Vault Provider via Helm from - %s.", config.HelmChartDir)) helm.Install(helm.InstallInput{ Config: config, }) } else if config.IsBackwardCompatibilityTest { // upgrade helm charts only if running backward compatibility tests By(fmt.Sprintf("Upgrading Secrets Store CSI Driver and Azure Key Vault Provider via Helm to New Version from - %s.", config.HelmChartDir)) helm.Upgrade(helm.UpgradeInput{ Config: config, }) } // Ensure all pods are running and ready before starting tests podStartupTimeout := e2eframework.TestContext.SystemPodsStartupTimeout for _, namespace := range coreNamespaces { if err := e2epod.WaitForPodsRunningReady(clientSet, namespace, int32(e2eframework.TestContext.MinStartupPods), int32(e2eframework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { e2eframework.DumpAllNamespaceInfo(clientSet, namespace) e2ekubectl.LogFailedContainers(clientSet, namespace, e2eframework.Logf) e2eframework.Failf("error waiting for all pods to be running and ready: %v", err) } } }) var _ = AfterSuite(func() { // cleanup defer func() { // uninstall if it's not Soak Test, not backward compatibility test and if cluster is already upgraded or it's not cluster upgrade test if !config.IsSoakTest && !config.IsBackwardCompatibilityTest && (!config.IsUpgradeTest || config.IsClusterUpgraded) { if helm.ReleaseExists() { By("Uninstalling Secrets Store CSI Driver and Azure Key Vault Provider via Helm") helm.Uninstall() } } }() dumpLogs() }) func initScheme() *runtime.Scheme { scheme := runtime.NewScheme() framework.TryAddDefaultSchemes(scheme) return scheme
func dumpLogs() { for component, containers := range map[string][]string{ "secrets-store-csi-driver": {"node-driver-registrar", "secrets-store", "liveness-probe"}, "csi-secrets-store-provider-azure": {"provider-azure-installer"}, } { podList := pod.List(pod.ListInput{ Lister: kubeClient, Namespace: framework.NamespaceKubeSystem, Labels: map[string]string{ "app": component, }, }) for _, p := range podList.Items { for _, containerName := range containers { By(fmt.Sprintf("Dumping logs for %s scheduled to %s, container %s", p.Name, p.Spec.NodeName, containerName)) out, err := exec.KubectlLogs(kubeconfigPath, p.Name, containerName, framework.NamespaceKubeSystem) Expect(err).To(BeNil()) fmt.Println(out + "\n") } } } } // getPodExecCommand returns the exec command to use for validating mount contents func getPodExecCommand(cmd string) string { if config.IsWindowsTest { return fmt.Sprintf("powershell.exe -Command %s", cmd) } return cmd }
}
zz_generated_restorepoints_client.go
//go:build go1.18 // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. package armsql import ( "context" "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" "net/url" "strings" ) // RestorePointsClient contains the methods for the RestorePoints group. // Don't use this type directly, use NewRestorePointsClient() instead. type RestorePointsClient struct { host string subscriptionID string pl runtime.Pipeline } // NewRestorePointsClient creates a new instance of RestorePointsClient with the specified values. // subscriptionID - The subscription ID that identifies an Azure subscription. // credential - used to authorize requests. Usually a credential from azidentity. // options - pass nil to accept the default values. func
(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RestorePointsClient, error) { if options == nil { options = &arm.ClientOptions{} } ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { ep = c.Endpoint } pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) if err != nil { return nil, err } client := &RestorePointsClient{ subscriptionID: subscriptionID, host: ep, pl: pl, } return client, nil } // BeginCreate - Creates a restore point for a data warehouse. // If the operation fails it returns an *azcore.ResponseError type. // Generated from API version 2020-11-01-preview // resourceGroupName - The name of the resource group that contains the resource. You can obtain this value from the Azure // Resource Manager API or the portal. // serverName - The name of the server. // databaseName - The name of the database. // parameters - The definition for creating the restore point of this database. // options - RestorePointsClientBeginCreateOptions contains the optional parameters for the RestorePointsClient.BeginCreate // method. func (client *RestorePointsClient) BeginCreate(ctx context.Context, resourceGroupName string, serverName string, databaseName string, parameters CreateDatabaseRestorePointDefinition, options *RestorePointsClientBeginCreateOptions) (*runtime.Poller[RestorePointsClientCreateResponse], error) { if options == nil || options.ResumeToken == "" { resp, err := client.create(ctx, resourceGroupName, serverName, databaseName, parameters, options) if err != nil { return nil, err } return runtime.NewPoller[RestorePointsClientCreateResponse](resp, client.pl, nil) } else { return runtime.NewPollerFromResumeToken[RestorePointsClientCreateResponse](options.ResumeToken, client.pl, nil) } } // Create - Creates a restore point for a data warehouse. // If the operation fails it returns an *azcore.ResponseError type. // Generated from API version 2020-11-01-preview func (client *RestorePointsClient) create(ctx context.Context, resourceGroupName string, serverName string, databaseName string, parameters CreateDatabaseRestorePointDefinition, options *RestorePointsClientBeginCreateOptions) (*http.Response, error) { req, err := client.createCreateRequest(ctx, resourceGroupName, serverName, databaseName, parameters, options) if err != nil { return nil, err } resp, err := client.pl.Do(req) if err != nil { return nil, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { return nil, runtime.NewResponseError(resp) } return resp, nil } // createCreateRequest creates the Create request. func (client *RestorePointsClient) createCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, parameters CreateDatabaseRestorePointDefinition, options *RestorePointsClientBeginCreateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints" if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if serverName == "" { return nil, errors.New("parameter serverName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{serverName}", url.PathEscape(serverName)) if databaseName == "" { return nil, errors.New("parameter databaseName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{databaseName}", url.PathEscape(databaseName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2020-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, parameters) } // Delete - Deletes a restore point. // If the operation fails it returns an *azcore.ResponseError type. // Generated from API version 2020-11-01-preview // resourceGroupName - The name of the resource group that contains the resource. You can obtain this value from the Azure // Resource Manager API or the portal. // serverName - The name of the server. // databaseName - The name of the database. // restorePointName - The name of the restore point. // options - RestorePointsClientDeleteOptions contains the optional parameters for the RestorePointsClient.Delete method. func (client *RestorePointsClient) Delete(ctx context.Context, resourceGroupName string, serverName string, databaseName string, restorePointName string, options *RestorePointsClientDeleteOptions) (RestorePointsClientDeleteResponse, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, serverName, databaseName, restorePointName, options) if err != nil { return RestorePointsClientDeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return RestorePointsClientDeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return RestorePointsClientDeleteResponse{}, runtime.NewResponseError(resp) } return RestorePointsClientDeleteResponse{}, nil } // deleteCreateRequest creates the Delete request. func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, restorePointName string, options *RestorePointsClientDeleteOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints/{restorePointName}" if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if serverName == "" { return nil, errors.New("parameter serverName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{serverName}", url.PathEscape(serverName)) if databaseName == "" { return nil, errors.New("parameter databaseName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{databaseName}", url.PathEscape(databaseName)) if restorePointName == "" { return nil, errors.New("parameter restorePointName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{restorePointName}", url.PathEscape(restorePointName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2020-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } // Get - Gets a restore point. // If the operation fails it returns an *azcore.ResponseError type. // Generated from API version 2020-11-01-preview // resourceGroupName - The name of the resource group that contains the resource. You can obtain this value from the Azure // Resource Manager API or the portal. // serverName - The name of the server. // databaseName - The name of the database. // restorePointName - The name of the restore point. // options - RestorePointsClientGetOptions contains the optional parameters for the RestorePointsClient.Get method. func (client *RestorePointsClient) Get(ctx context.Context, resourceGroupName string, serverName string, databaseName string, restorePointName string, options *RestorePointsClientGetOptions) (RestorePointsClientGetResponse, error) { req, err := client.getCreateRequest(ctx, resourceGroupName, serverName, databaseName, restorePointName, options) if err != nil { return RestorePointsClientGetResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return RestorePointsClientGetResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return RestorePointsClientGetResponse{}, runtime.NewResponseError(resp) } return client.getHandleResponse(resp) } // getCreateRequest creates the Get request. func (client *RestorePointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, restorePointName string, options *RestorePointsClientGetOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints/{restorePointName}" if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if serverName == "" { return nil, errors.New("parameter serverName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{serverName}", url.PathEscape(serverName)) if databaseName == "" { return nil, errors.New("parameter databaseName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{databaseName}", url.PathEscape(databaseName)) if restorePointName == "" { return nil, errors.New("parameter restorePointName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{restorePointName}", url.PathEscape(restorePointName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2020-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } // getHandleResponse handles the Get response. func (client *RestorePointsClient) getHandleResponse(resp *http.Response) (RestorePointsClientGetResponse, error) { result := RestorePointsClientGetResponse{} if err := runtime.UnmarshalAsJSON(resp, &result.RestorePoint); err != nil { return RestorePointsClientGetResponse{}, err } return result, nil } // NewListByDatabasePager - Gets a list of database restore points. // If the operation fails it returns an *azcore.ResponseError type. // Generated from API version 2020-11-01-preview // resourceGroupName - The name of the resource group that contains the resource. You can obtain this value from the Azure // Resource Manager API or the portal. // serverName - The name of the server. // databaseName - The name of the database. // options - RestorePointsClientListByDatabaseOptions contains the optional parameters for the RestorePointsClient.ListByDatabase // method. func (client *RestorePointsClient) NewListByDatabasePager(resourceGroupName string, serverName string, databaseName string, options *RestorePointsClientListByDatabaseOptions) *runtime.Pager[RestorePointsClientListByDatabaseResponse] { return runtime.NewPager(runtime.PagingHandler[RestorePointsClientListByDatabaseResponse]{ More: func(page RestorePointsClientListByDatabaseResponse) bool { return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RestorePointsClientListByDatabaseResponse) (RestorePointsClientListByDatabaseResponse, error) { var req *policy.Request var err error if page == nil { req, err = client.listByDatabaseCreateRequest(ctx, resourceGroupName, serverName, databaseName, options) } else { req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) } if err != nil { return RestorePointsClientListByDatabaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return RestorePointsClientListByDatabaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return RestorePointsClientListByDatabaseResponse{}, runtime.NewResponseError(resp) } return client.listByDatabaseHandleResponse(resp) }, }) } // listByDatabaseCreateRequest creates the ListByDatabase request. func (client *RestorePointsClient) listByDatabaseCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, options *RestorePointsClientListByDatabaseOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints" if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if serverName == "" { return nil, errors.New("parameter serverName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{serverName}", url.PathEscape(serverName)) if databaseName == "" { return nil, errors.New("parameter databaseName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{databaseName}", url.PathEscape(databaseName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2020-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } // listByDatabaseHandleResponse handles the ListByDatabase response. func (client *RestorePointsClient) listByDatabaseHandleResponse(resp *http.Response) (RestorePointsClientListByDatabaseResponse, error) { result := RestorePointsClientListByDatabaseResponse{} if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointListResult); err != nil { return RestorePointsClientListByDatabaseResponse{}, err } return result, nil }
NewRestorePointsClient
test_recording.py
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class RecordingTestCase(IntegrationTestCase): def test_update_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(status="in-progress") values = {'Status': "in-progress", } self.holodeck.assert_has_request(Request( 'post', 'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Conferences/CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Recordings/REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json', data=values, )) def test_update_response(self): self.holodeck.mock(Response( 200, ''' { "account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "api_version": "2010-04-01", "call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "conference_sid": "CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "channels": 1, "date_created": "Fri, 14 Oct 2016 21:56:34 +0000", "date_updated": "Fri, 14 Oct 2016 21:56:39 +0000", "start_time": "Fri, 14 Oct 2016 21:56:34 +0000", "price": null, "price_unit": null, "duration": null, "sid": "REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "source": "StartConferenceRecordingAPI", "status": "paused", "error_code": null, "encryption_details": null, "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json" } ''' )) actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(status="in-progress") self.assertIsNotNone(actual) def test_fetch_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.holodeck.assert_has_request(Request( 'get', 'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Conferences/CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Recordings/REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json', )) def test_fetch_response(self): self.holodeck.mock(Response( 200, ''' { "account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "api_version": "2010-04-01", "call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "conference_sid": "CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "channels": "1", "date_created": "Fri, 14 Oct 2016 21:56:34 +0000", "date_updated": "Fri, 14 Oct 2016 21:56:38 +0000", "start_time": "Fri, 14 Oct 2016 21:56:34 +0000", "price": "-0.0025", "price_unit": "USD", "duration": "4", "sid": "REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "source": "StartConferenceRecordingAPI", "status": "completed", "error_code": null, "encryption_details": { "encryption_public_key_sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "encryption_cek": "OV4h6zrsxMIW7h0Zfqwfn6TI2GCNl54KALlg8wn8YB8KYZhXt6HlgvBWAmQTlfYVeLWydMiCewY0YkDDT1xmNe5huEo9vjuKBS5OmYK4CZkSx1NVv3XOGrZHpd2Pl/5WJHVhUK//AUO87uh5qnUP2E0KoLh1nyCLeGcEkXU0RfpPn/6nxjof/n6m6OzZOyeIRK4Oed5+rEtjqFDfqT0EVKjs6JAxv+f0DCc1xYRHl2yV8bahUPVKs+bHYdy4PVszFKa76M/Uae4jFA9Lv233JqWcxj+K2UoghuGhAFbV/JQIIswY2CBYI8JlVSifSqNEl9vvsTJ8bkVMm3MKbG2P7Q==", "encryption_iv": "8I2hhNIYNTrwxfHk" }, "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json" } ''' )) actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.assertIsNotNone(actual) def test_delete_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete() self.holodeck.assert_has_request(Request( 'delete', 'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Conferences/CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Recordings/REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json', )) def test_delete_response(self): self.holodeck.mock(Response( 204, None, )) actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings(sid="REXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete() self.assertTrue(actual) def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings.list() self.holodeck.assert_has_request(Request( 'get', 'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Conferences/CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Recordings.json', )) def test_read_full_response(self): self.holodeck.mock(Response( 200, ''' { "end": 0, "first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json?PageSize=50&Page=0", "next_page_uri": null, "page": 0, "page_size": 50, "previous_page_uri": null, "recordings": [ { "account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "api_version": "2010-04-01", "call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "conference_sid": "CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "channels": "1", "date_created": "Fri, 14 Oct 2016 21:56:34 +0000", "date_updated": "Fri, 14 Oct 2016 21:56:38 +0000", "start_time": "Fri, 14 Oct 2016 21:56:34 +0000", "price": "-0.0025", "price_unit": "USD", "duration": "4", "sid": "REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "source": "StartConferenceRecordingAPI", "status": "completed", "error_code": null, "encryption_details": { "encryption_public_key_sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "encryption_cek": "OV4h6zrsxMIW7h0Zfqwfn6TI2GCNl54KALlg8wn8YB8KYZhXt6HlgvBWAmQTlfYVeLWydMiCewY0YkDDT1xmNe5huEo9vjuKBS5OmYK4CZkSx1NVv3XOGrZHpd2Pl/5WJHVhUK//AUO87uh5qnUP2E0KoLh1nyCLeGcEkXU0RfpPn/6nxjof/n6m6OzZOyeIRK4Oed5+rEtjqFDfqT0EVKjs6JAxv+f0DCc1xYRHl2yV8bahUPVKs+bHYdy4PVszFKa76M/Uae4jFA9Lv233JqWcxj+K2UoghuGhAFbV/JQIIswY2CBYI8JlVSifSqNEl9vvsTJ8bkVMm3MKbG2P7Q==", "encryption_iv": "8I2hhNIYNTrwxfHk" }, "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json" } ], "start": 0, "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json?PageSize=50&Page=0" } ''' )) actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings.list() self.assertIsNotNone(actual)
self.holodeck.mock(Response( 200, ''' { "end": 0, "first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json?PageSize=50&Page=0", "next_page_uri": null, "page": 0, "page_size": 50, "previous_page_uri": null, "recordings": [], "start": 0, "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences/CFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json?PageSize=50&Page=0" } ''' )) actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .conferences(sid="CFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .recordings.list() self.assertIsNotNone(actual)
def test_read_empty_response(self):
data.py
import os import torch class Dictionary(object): """Build word2idx and idx2word from Corpus(train/val/test)"""
def __init__(self): self.word2idx = {} # word: index self.idx2word = [] # position(index): word def add_word(self, word): """Create/Update word2idx and idx2word""" if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 return self.word2idx[word] def __len__(self): return len(self.idx2word) class Corpus(object): """Corpus Tokenizer""" def __init__(self, path): self.dictionary = Dictionary() self.train = self.tokenize(os.path.join(path, 'train.txt')) self.valid = self.tokenize(os.path.join(path, 'valid.txt')) self.test = self.tokenize(os.path.join(path, 'test.txt')) def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: # line to list of token + eos words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = torch.LongTensor(tokens) token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return ids
listAllCameras.js
const HuddlyDeviceApiIp = require('@huddly/device-api-ip').default;
const HuddlySdk = require('@huddly/sdk').default; const { HUDDLY_L1_PID } = require('@huddly/sdk/lib/src/components/device/factory').default; const ipApi = new HuddlyDeviceApiIp(); const usbApi = new HuddlyDeviceApiIUsb(); // Create an instance of the SDK const sdk = new HuddlySdk([usbApi, ipApi], [usbApi, ipApi]); const applicationTeardown = () => { process.exit(); } if (!process.argv[2]) { console.log('Note: Default search time (30 seconds). Update it by adding an extra cmd argument.'); } const searchTime = process.argv[2] || 30; const cameraList = []; sdk.on('ATTACH', (huddlyDevice) => { console.log(`Found [${huddlyDevice.productName}] device!`) huddlyDevice.getInfo() .then(info => { cameraList.push(info) }) .catch((e) => { console.error(`Unable to get camera info from [${huddlyDevice.productName}]`) console.trace(e); }).finally(_ => { huddlyDevice.closeConnection(); }); }); setTimeout(() => { console.group('Camera List') console.log(cameraList) console.groupEnd(); applicationTeardown(); }, searchTime * 1000) // Call init() to trigger device discovery sdk.init();
const HuddlyDeviceApiIUsb = require('@huddly/device-api-usb').default;
343.py
""" link: https://leetcode.com/problems/integer-break problem: 将n拆分成若干个整数之和,求这堆整数的最大积,2 <= n <= 58
solution: DP。dp[i] 为 n==i 时的最优解,遍历所有组合可能即可 """ class Solution: def integerBreak(self, n: int) -> int: dp = [1 for _ in range(n + 1)] for i in range(2, n + 1): for j in range(1, i // 2 + 1): dp[i] = max(dp[i], max(dp[j], j) * max(dp[i - j], i - j)) return dp[n]
index.tsx
import { Menu } from '@arco-design/web-react'; import React from 'react'; export function
(props: { onChange: (val: string) => void }) { const list = [ { value: 'H1', label: 'H1', }, { value: 'H2', label: 'H2', }, { value: 'H3', label: 'H3', }, { value: 'H4', label: 'H4', }, { value: 'H5', label: 'H5', }, { value: 'H6', label: 'H6', }, { value: 'P', label: 'Paragraph', }, ]; return ( <Menu onClickMenuItem={(item) => { props.onChange(item); }} selectedKeys={[]} style={{ width: 100, border: 'none' }} > {list.map((item) => ( <Menu.Item style={{ lineHeight: '30px', height: 30 }} key={item.value}> {item.label} </Menu.Item> ))} </Menu> ); }
Heading
dot.rs
//! Generating Graphviz `dot` files from our IR. use super::context::{BindgenContext, ItemId}; use super::traversal::Trace; use std::fs::File; use std::io::{self, Write}; use std::path::Path; /// A trait for anything that can write attributes as `<table>` rows to a dot /// file. pub trait DotAttributes { /// Write this thing's attributes to the given output. Each attribute must /// be its own `<tr>...</tr>`. fn dot_attributes<W>( &self, ctx: &BindgenContext, out: &mut W, ) -> io::Result<()> where W: io::Write; } /// Write a graphviz dot file containing our IR. pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()> where P: AsRef<Path>,
{ let file = File::create(path)?; let mut dot_file = io::BufWriter::new(file); writeln!(&mut dot_file, "digraph {{")?; let mut err: Option<io::Result<_>> = None; for (id, item) in ctx.items() { let is_allowlisted = ctx.allowlisted_items().contains(&id); writeln!( &mut dot_file, r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#, id.as_usize(), if is_allowlisted { "black" } else { "gray" } )?; item.dot_attributes(ctx, &mut dot_file)?; writeln!(&mut dot_file, r#"</table> >];"#)?; item.trace( ctx, &mut |sub_id: ItemId, edge_kind| { if err.is_some() { return; } match writeln!( &mut dot_file, "{} -> {} [label={:?}, color={}];", id.as_usize(), sub_id.as_usize(), edge_kind, if is_allowlisted { "black" } else { "gray" } ) { Ok(_) => {} Err(e) => err = Some(Err(e)), } }, &(), ); if let Some(err) = err { return err; } if let Some(module) = item.as_module() { for child in module.children() { writeln!( &mut dot_file, "{} -> {} [style=dotted, color=gray]", item.id().as_usize(), child.as_usize() )?; } } } writeln!(&mut dot_file, "}}")?; Ok(()) }
utils.rs
#![allow(clippy::or_fun_call, clippy::expect_fun_call, dead_code)] use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, sync::{ atomic::{AtomicU64, Ordering::SeqCst}, Arc, Once, }, }; use js_int::uint; use maplit::btreemap; use ruma_common::MilliSecondsSinceUnixEpoch; use ruma_events::{ pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::JoinRule, member::{MemberEventContent, MembershipState}, }, EventType, }; use ruma_identifiers::{EventId, RoomId, RoomVersionId, UserId}; use ruma_state_res::{auth_types_for_event, Error, Event, Result, StateMap, StateResolution}; use serde_json::{json, Value as JsonValue}; use tracing::info; use tracing_subscriber as tracer; pub use event::StateEvent; pub static LOGGER: Once = Once::new(); static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); pub fn do_check( events: &[Arc<StateEvent>], edges: Vec<Vec<EventId>>, expected_state_ids: Vec<EventId>, ) { // to activate logging use `RUST_LOG=debug cargo t` let _ = LOGGER .call_once(|| tracer::fmt().with_env_filter(tracer::EnvFilter::from_default_env()).init()); let init_events = INITIAL_EVENTS(); let mut store = TestStore( init_events.values().chain(events).map(|ev| (ev.event_id().clone(), ev.clone())).collect(), ); // This will be lexi_topo_sorted for resolution let mut graph = BTreeMap::new(); // This is the same as in `resolve` event_id -> StateEvent let mut fake_event_map = BTreeMap::new(); // Create the DB of events that led up to this point // TODO maybe clean up some of these clones it is just tests but... for ev in init_events.values().chain(events) { graph.insert(ev.event_id().clone(), vec![]); fake_event_map.insert(ev.event_id().clone(), ev.clone()); } for pair in INITIAL_EDGES().windows(2) { if let [a, b] = &pair { graph.entry(a.clone()).or_insert(vec![]).push(b.clone()); } } for edge_list in edges { for pair in edge_list.windows(2) { if let [a, b] = &pair { graph.entry(a.clone()).or_insert(vec![]).push(b.clone()); } } } // event_id -> StateEvent let mut event_map: BTreeMap<EventId, Arc<StateEvent>> = BTreeMap::new(); // event_id -> StateMap<EventId> let mut state_at_event: BTreeMap<EventId, StateMap<EventId>> = BTreeMap::new(); // Resolve the current state and add it to the state_at_event map then continue // on in "time" for node in StateResolution::lexicographical_topological_sort(&graph, |id| { (0, MilliSecondsSinceUnixEpoch(uint!(0)), id.clone()) }) { let fake_event = fake_event_map.get(&node).unwrap(); let event_id = fake_event.event_id().clone(); let prev_events = graph.get(&node).unwrap(); let state_before: StateMap<EventId> = if prev_events.is_empty() { BTreeMap::new() } else if prev_events.len() == 1 { state_at_event.get(&prev_events[0]).unwrap().clone() } else { let state_sets = prev_events .iter() .filter_map(|k| state_at_event.get(k)) .cloned() .collect::<Vec<_>>(); info!( "{:#?}", state_sets .iter() .map(|map| map .iter() .map(|((ty, key), id)| format!("(({}{:?}), {})", ty, key, id)) .collect::<Vec<_>>()) .collect::<Vec<_>>() ); let resolved = StateResolution::resolve( &room_id(), &RoomVersionId::Version6, &state_sets, state_sets .iter() .map(|map| { store .auth_event_ids(&room_id(), &map.values().cloned().collect::<Vec<_>>()) .unwrap() }) .collect(), &mut event_map, ); match resolved { Ok(state) => state, Err(e) => panic!("resolution for {} failed: {}", node, e), } }; let mut state_after = state_before.clone(); let ty = fake_event.kind(); let key = fake_event.state_key(); state_after.insert((ty, key), event_id.clone()); let auth_types = auth_types_for_event( &fake_event.kind(), fake_event.sender(), Some(fake_event.state_key()), fake_event.content(), ); let mut auth_events = vec![]; for key in auth_types { if state_before.contains_key(&key) { auth_events.push(state_before[&key].clone()) } } // TODO The event is just remade, adding the auth_events and prev_events here // the `to_pdu_event` was split into `init` and the fn below, could be better let e = fake_event; let ev_id = e.event_id().clone(); let event = to_pdu_event( e.event_id().as_str(), e.sender().clone(), e.kind().clone(), Some(&e.state_key()), e.content(), &auth_events, prev_events, ); // We have to update our store, an actual user of this lib would // be giving us state from a DB. store.0.insert(ev_id.clone(), event.clone()); state_at_event.insert(node, state_after); event_map.insert(event_id.clone(), Arc::clone(store.0.get(&ev_id).unwrap())); } let mut expected_state = StateMap::new(); for node in expected_state_ids { let ev = event_map.get(&node).expect(&format!( "{} not found in {:?}", node.to_string(), event_map.keys().map(ToString::to_string).collect::<Vec<_>>(), )); let key = (ev.kind(), ev.state_key()); expected_state.insert(key, node); } let start_state = state_at_event.get(&event_id("$START:foo")).unwrap(); let end_state = state_at_event .get(&event_id("$END:foo")) .unwrap() .iter() .filter(|(k, v)| { expected_state.contains_key(k) || start_state.get(k) != Some(*v) // Filter out the dummy messages events. // These act as points in time where there should be a known state to // test against. && **k != (EventType::RoomMessage, "dummy".to_string()) }) .map(|(k, v)| (k.clone(), v.clone())) .collect::<StateMap<EventId>>(); assert_eq!(expected_state, end_state); } pub struct TestStore<E: Event>(pub BTreeMap<EventId, Arc<E>>); #[allow(unused)] impl<E: Event> TestStore<E> { pub fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result<Arc<E>> { self.0 .get(event_id) .map(Arc::clone) .ok_or_else(|| Error::NotFound(format!("{} not found", event_id.to_string()))) } /// Returns the events that correspond to the `event_ids` sorted in the same order. pub fn get_events(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<Vec<Arc<E>>> { let mut events = vec![]; for id in event_ids { events.push(self.get_event(room_id, id)?); } Ok(events) } /// Returns a Vec of the related auth events to the given `event`. pub fn auth_event_ids(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<Vec<EventId>> { let mut result = vec![]; let mut stack = event_ids.to_vec(); // DFS for auth event chain while !stack.is_empty() { let ev_id = stack.pop().unwrap(); if result.contains(&ev_id) { continue; } result.push(ev_id.clone()); let event = self.get_event(room_id, &ev_id)?; stack.extend(event.auth_events().clone()); } Ok(result) } /// Returns a Vec<EventId> representing the difference in auth chains of the given `events`. pub fn auth_chain_diff( &self, room_id: &RoomId, event_ids: Vec<Vec<EventId>>, ) -> Result<Vec<EventId>> { use itertools::Itertools; let mut chains = vec![]; for ids in event_ids { // TODO state store `auth_event_ids` returns self in the event ids list // when an event returns `auth_event_ids` self is not contained let chain = self.auth_event_ids(room_id, &ids)?.into_iter().collect::<BTreeSet<_>>(); chains.push(chain); } if let Some(chain) = chains.first().cloned() { let rest = chains.iter().skip(1).flatten().cloned().collect(); let common = chain.intersection(&rest).collect::<Vec<_>>(); Ok(chains.into_iter().flatten().filter(|id| !common.contains(&id)).dedup().collect()) } else { Ok(vec![]) } } } pub fn event_id(id: &str) -> EventId { if id.contains('$') { return EventId::try_from(id).unwrap(); } EventId::try_from(format!("${}:foo", id)).unwrap() } pub fn alice() -> UserId { UserId::try_from("@alice:foo").unwrap() } pub fn bob() -> UserId { UserId::try_from("@bob:foo").unwrap() } pub fn charlie() -> UserId { UserId::try_from("@charlie:foo").unwrap() } pub fn ella() -> UserId { UserId::try_from("@ella:foo").unwrap() } pub fn zara() -> UserId { UserId::try_from("@zara:foo").unwrap() } pub fn room_id() -> RoomId { RoomId::try_from("!test:foo").unwrap() } pub fn member_content_ban() -> JsonValue { serde_json::to_value(MemberEventContent::new(MembershipState::Ban)).unwrap() } pub fn member_content_join() -> JsonValue { serde_json::to_value(MemberEventContent::new(MembershipState::Join)).unwrap() } pub fn to_init_pdu_event( id: &str, sender: UserId, ev_type: EventType, state_key: Option<&str>, content: JsonValue, ) -> Arc<StateEvent> { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_string() } else { format!("${}:foo", id) }; let state_key = state_key.map(ToString::to_string); Arc::new(StateEvent { event_id: EventId::try_from(id).unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id(), sender, origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), state_key, kind: ev_type, content, redacts: None, unsigned: btreemap! {}, #[cfg(not(feature = "unstable-pre-spec"))] origin: "foo".into(), auth_events: vec![], prev_events: vec![], depth: uint!(0), hashes: EventHash { sha256: "".into() }, signatures: btreemap! {}, }), }) } pub fn to_pdu_event<S>( id: &str, sender: UserId, ev_type: EventType, state_key: Option<&str>, content: JsonValue, auth_events: &[S], prev_events: &[S], ) -> Arc<StateEvent> where S: AsRef<str>, { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_string() } else { format!("${}:foo", id) }; let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::<Vec<_>>(); let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::<Vec<_>>(); let state_key = state_key.map(ToString::to_string); Arc::new(StateEvent { event_id: EventId::try_from(id).unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id(), sender, origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), state_key, kind: ev_type, content, redacts: None, unsigned: btreemap! {}, #[cfg(not(feature = "unstable-pre-spec"))] origin: "foo".into(), auth_events, prev_events, depth: uint!(0), hashes: EventHash { sha256: "".into() }, signatures: btreemap! {}, }), }) } // all graphs start with these input events #[allow(non_snake_case)] pub fn INITIAL_EVENTS() -> BTreeMap<EventId, Arc<StateEvent>> { // this is always called so we can init the logger here let _ = LOGGER .call_once(|| tracer::fmt().with_env_filter(tracer::EnvFilter::from_default_env()).init()); vec![ to_pdu_event::<EventId>( "CREATE", alice(), EventType::RoomCreate, Some(""), json!({ "creator": alice() }), &[], &[], ), to_pdu_event( "IMA", alice(), EventType::RoomMember, Some(alice().to_string().as_str()), member_content_join(), &["CREATE"], &["CREATE"], ), to_pdu_event( "IPOWER", alice(), EventType::RoomPowerLevels, Some(""), json!({ "users": { alice().to_string(): 100 } }), &["CREATE", "IMA"], &["IMA"], ), to_pdu_event( "IJR", alice(), EventType::RoomJoinRules, Some(""), json!({ "join_rule": JoinRule::Public }), &["CREATE", "IMA", "IPOWER"], &["IPOWER"], ), to_pdu_event( "IMB", bob(), EventType::RoomMember, Some(bob().to_string().as_str()), member_content_join(), &["CREATE", "IJR", "IPOWER"], &["IJR"], ), to_pdu_event( "IMC", charlie(), EventType::RoomMember, Some(charlie().to_string().as_str()), member_content_join(), &["CREATE", "IJR", "IPOWER"], &["IMB"], ), to_pdu_event::<EventId>( "START", charlie(), EventType::RoomMessage, Some("dummy"), json!({}), &[], &[], ), to_pdu_event::<EventId>( "END", charlie(), EventType::RoomMessage, Some("dummy"), json!({}), &[], &[], ), ] .into_iter() .map(|ev| (ev.event_id().clone(), ev)) .collect() } #[allow(non_snake_case)] pub fn INITIAL_EDGES() -> Vec<EventId> { vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] .into_iter() .map(event_id) .collect::<Vec<_>>() } pub mod event { use std::collections::BTreeMap; use js_int::UInt; use ruma_events::{ exports::ruma_common::MilliSecondsSinceUnixEpoch, pdu::{EventHash, Pdu}, room::member::{MemberEventContent, MembershipState}, EventType, }; use ruma_identifiers::{ EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use ruma_serde::CanonicalJsonObject; use ruma_state_res::Event; use serde::{Deserialize, Serialize}; use serde_json::Value as JsonValue; impl Event for StateEvent { fn event_id(&self) -> &EventId { self.event_id() } fn room_id(&self) -> &RoomId { self.room_id() } fn sender(&self) -> &UserId { self.sender() } fn kind(&self) -> EventType { self.kind() } fn content(&self) -> serde_json::Value { self.content() } fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { *self.origin_server_ts() } fn state_key(&self) -> Option<String> { Some(self.state_key()) } fn prev_events(&self) -> Vec<EventId> { self.prev_event_ids() } fn depth(&self) -> &UInt { self.depth() } fn auth_events(&self) -> Vec<EventId> { self.auth_events() } fn redacts(&self) -> Option<&EventId> { self.redacts() } fn hashes(&self) -> &EventHash { self.hashes() } fn signatures(&self) -> BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>> { self.signatures() } fn unsigned(&self) -> &BTreeMap<String, JsonValue> { self.unsigned() } } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct StateEvent { pub event_id: EventId, #[serde(flatten)] pub rest: Pdu, } impl StateEvent { pub fn from_id_value(id: EventId, json: serde_json::Value) -> serde_json::Result<Self> { Ok(Self { event_id: id, rest: Pdu::RoomV3Pdu(serde_json::from_value(json)?) }) } pub fn from_id_canon_obj( id: EventId, json: CanonicalJsonObject, ) -> serde_json::Result<Self> { Ok(Self { event_id: id, // TODO: this is unfortunate (from_value(to_value(json)))... rest: Pdu::RoomV3Pdu(serde_json::from_value(serde_json::to_value(json)?)?), }) } pub fn is_power_event(&self) -> bool { match &self.rest { Pdu::RoomV1Pdu(event) => match event.kind { EventType::RoomPowerLevels | EventType::RoomJoinRules | EventType::RoomCreate => event.state_key == Some("".into()), EventType::RoomMember => { // TODO fix clone if let Ok(content) = serde_json::from_value::<MemberEventContent>(event.content.clone()) { if [MembershipState::Leave, MembershipState::Ban] .contains(&content.membership) { return event.sender.as_str() // TODO is None here a failure != event.state_key.as_deref().unwrap_or("NOT A STATE KEY"); } } false } _ => false, }, Pdu::RoomV3Pdu(event) => event.state_key == Some("".into()), } } pub fn deserialize_content<C: serde::de::DeserializeOwned>(&self) -> serde_json::Result<C> { match &self.rest { Pdu::RoomV1Pdu(ev) => serde_json::from_value(ev.content.clone()), Pdu::RoomV3Pdu(ev) => serde_json::from_value(ev.content.clone()), } } pub fn origin_server_ts(&self) -> &MilliSecondsSinceUnixEpoch { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.origin_server_ts, Pdu::RoomV3Pdu(ev) => &ev.origin_server_ts, } } pub fn event_id(&self) -> &EventId { &self.event_id } pub fn sender(&self) -> &UserId { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.sender, Pdu::RoomV3Pdu(ev) => &ev.sender, } } pub fn redacts(&self) -> Option<&EventId> { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), } } pub fn room_id(&self) -> &RoomId { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.room_id, Pdu::RoomV3Pdu(ev) => &ev.room_id, } } pub fn kind(&self) -> EventType { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.kind.clone(), Pdu::RoomV3Pdu(ev) => ev.kind.clone(), } } pub fn state_key(&self) -> String { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.state_key.clone().unwrap(), Pdu::RoomV3Pdu(ev) => ev.state_key.clone().unwrap(), } } #[cfg(not(feature = "unstable-pre-spec"))] pub fn origin(&self) -> String { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.origin.clone(), Pdu::RoomV3Pdu(ev) => ev.origin.clone(), } } pub fn prev_event_ids(&self) -> Vec<EventId> { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.prev_events.iter().map(|(id, _)| id).cloned().collect(), Pdu::RoomV3Pdu(ev) => ev.prev_events.clone(), } } pub fn auth_events(&self) -> Vec<EventId> { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.auth_events.iter().map(|(id, _)| id).cloned().collect(), Pdu::RoomV3Pdu(ev) => ev.auth_events.to_vec(), } } pub fn content(&self) -> serde_json::Value { match &self.rest { Pdu::RoomV1Pdu(ev) => ev.content.clone(), Pdu::RoomV3Pdu(ev) => ev.content.clone(), } } pub fn unsigned(&self) -> &BTreeMap<String, serde_json::Value> { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.unsigned, Pdu::RoomV3Pdu(ev) => &ev.unsigned, } } pub fn signatures( &self, ) -> BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>> { match &self.rest { Pdu::RoomV1Pdu(_) => maplit::btreemap! {}, Pdu::RoomV3Pdu(ev) => ev.signatures.clone(), } } pub fn hashes(&self) -> &EventHash { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.hashes, Pdu::RoomV3Pdu(ev) => &ev.hashes, } } pub fn depth(&self) -> &UInt { match &self.rest { Pdu::RoomV1Pdu(ev) => &ev.depth, Pdu::RoomV3Pdu(ev) => &ev.depth, } } pub fn
(&self, ev_type: EventType, state_key: &str) -> bool { match &self.rest { Pdu::RoomV1Pdu(ev) => { ev.kind == ev_type && ev.state_key.as_deref() == Some(state_key) } Pdu::RoomV3Pdu(ev) => { ev.kind == ev_type && ev.state_key.as_deref() == Some(state_key) } } } /// Returns the room version this event is formatted for. /// /// Currently either version 1 or 6 is returned, 6 represents /// version 3 and above. pub fn room_version(&self) -> RoomVersionId { // TODO: We have to know the actual room version this is not sufficient match self.rest { Pdu::RoomV1Pdu(_) => RoomVersionId::Version1, Pdu::RoomV3Pdu(_) => RoomVersionId::Version6, } } } }
is_type_and_key
ssh_key.rs
// id number This is a unique identification number for the // key. This can be used to reference a specific SSH key when you wish to embed // a key into a Droplet. // fingerprint string This attribute contains the fingerprint value // that is generated from the public key. This is a unique identifier that will // differentiate it from other keys using a format that SSH recognizes. // public_key string This attribute contains the entire public key // string that was uploaded. This is what is embedded into the root user's // authorized_keys file if you choose to include this SSH key during Droplet // creation. // name string This is the human-readable display name for the // given SSH key. This is used to easily identify the SSH keys when they are // displayed. use std::fmt; use std::borrow::Cow; use response::NamedResponse; use response; #[derive(Deserialize, Debug)] pub struct SshKey { pub id: f64, pub fingerprint: String, pub public_key: String, pub name: String, } impl response::NotArray for SshKey {} impl NamedResponse for SshKey { fn name<'a>() -> Cow<'a, str> { "ssh_key".into() } } impl fmt::Display for SshKey { fn
(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ID: {:.0}\n\ Fingerprint: {}\n\ Public Key: {}\n\ Name: {}", self.id, self.fingerprint, self.public_key, self.name) } } pub type SshKeys = Vec<SshKey>;
fmt