file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
hooks.test.tsx | import React from 'react';
import {mountWithApp, mount} from 'test-utilities';
import {
IndexRowContext,
IndexSelectionChangeContext,
IndexContextType,
IndexContext,
} from '../context';
import {
BulkSelectionDataOptions,
HandleBulkSelectionOptions,
SelectionType,
} from '../types';
import {
useIndexRow,
useIndexSelectionChange,
useIndexValue,
useBulkSelectionData,
useHandleBulkSelection,
} from '../hooks';
interface IndexSelectionChangeTypedChildProps {
onSelectionChange: ReturnType<typeof useIndexSelectionChange>;
}
interface IndexValueTypedChildProps extends IndexContextType {}
interface BulkSelectionDataTypedChildProps
extends ReturnType<typeof useBulkSelectionData> {}
interface HandleBulkSelectionTypedChildProps {
onSelectionChange: ReturnType<typeof useHandleBulkSelection>;
}
describe('useIndexRow', () => {
it('returns selectMode & condensed', () => {
const spy = jest.fn();
function MockComponent() {
const value = useIndexRow();
spy(value);
return null;
}
mountWithApp(
<IndexRowContext.Provider value={{selectMode: true, condensed: true}}>
<MockComponent />
</IndexRowContext.Provider>,
);
expect(spy).toHaveBeenCalledWith({selectMode: true, condensed: true});
});
it('throws when IndexProvider is not being used', () => {
const consoleErrorSpy = jest.spyOn(console, 'error');
consoleErrorSpy.mockImplementation(() => {});
function callback() {
function MockComponent() { | return null;
}
mount(<MockComponent />);
}
expect(callback).toThrow(`Missing IndexProvider context`);
consoleErrorSpy.mockRestore();
});
});
describe('useIndexSelectionChange', () => {
function TypedChild(_: IndexSelectionChangeTypedChildProps) {
return null;
}
function MockComponent() {
const onSelectionChange = useIndexSelectionChange();
return <TypedChild onSelectionChange={onSelectionChange} />;
}
let consoleErrorSpy: jest.SpyInstance;
beforeEach(() => {
consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
});
afterEach(() => {
consoleErrorSpy.mockRestore();
});
it('throws when IndexSelectionChangeContext is missing', () => {
function throwMissingContext() {
mount(<MockComponent />);
}
expect(throwMissingContext).toThrow('Missing IndexProvider context');
});
it('returns onSelectionChange', () => {
const contextSpy = jest.fn();
const mockComponent = mount(
<IndexSelectionChangeContext.Provider value={contextSpy}>
<MockComponent />
</IndexSelectionChangeContext.Provider>,
);
expect(mockComponent).toContainReactComponent(TypedChild, {
onSelectionChange: contextSpy,
});
});
});
describe('useIndexValue', () => {
function TypedChild(_: IndexValueTypedChildProps) {
return null;
}
function MockComponent() {
const contextValues = useIndexValue();
return <TypedChild {...contextValues} />;
}
let consoleErrorSpy: jest.SpyInstance;
beforeEach(() => {
consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
});
afterEach(() => {
consoleErrorSpy.mockRestore();
});
it('throws when IndexContext is missing', () => {
function throwMissingContext() {
mount(<MockComponent />);
}
expect(throwMissingContext).toThrow('Missing IndexProvider context');
});
it('returns index context values', () => {
const contextValues = {
resourceName: {singular: 'singular', plural: 'plural'},
selectedItemsCount: 0,
selectMode: false,
itemCount: 3,
};
const mockComponent = mount(
<IndexContext.Provider value={contextValues}>
<MockComponent />
</IndexContext.Provider>,
);
expect(mockComponent).toContainReactComponent(TypedChild, contextValues);
});
});
describe('useBulkSelectionData', () => {
function TypedChild(_: BulkSelectionDataTypedChildProps) {
return null;
}
function MockComponent(options: Partial<BulkSelectionDataOptions>) {
const contextValues = useBulkSelectionData({
selectedItemsCount: 0,
itemCount: 4,
hasMoreItems: true,
resourceName: {
singular: 'test',
plural: 'tests',
},
...options,
});
return <TypedChild {...contextValues} />;
}
it('returns paginated select all text when all resources are selected', () => {
const itemCount = 4;
const resourceName = {
singular: 'order',
plural: 'orders',
};
const paginatedSelectAllText = `All ${itemCount}+ ${resourceName.plural} in your store are selected.`;
const mockComponent = mountWithApp(
<MockComponent
selectedItemsCount="All"
hasMoreItems
itemCount={itemCount}
resourceName={resourceName}
/>,
);
expect(mockComponent).toContainReactComponent(TypedChild, {
paginatedSelectAllText,
});
});
});
describe('useHandleBulkSelection', () => {
function TypedChild(_: HandleBulkSelectionTypedChildProps) {
return null;
}
function MockComponent(options: HandleBulkSelectionOptions) {
const contextValue = useHandleBulkSelection(options);
return <TypedChild onSelectionChange={contextValue} />;
}
it('selects ranges', () => {
const onSelectionChangeSpy = jest.fn();
const mockComponent = mount(
<MockComponent onSelectionChange={onSelectionChangeSpy} />,
);
const typedChild = mockComponent.find(TypedChild)!;
// First selection cannot be multi
typedChild.trigger(
'onSelectionChange',
SelectionType.Multi,
true,
undefined,
3,
);
typedChild.trigger(
'onSelectionChange',
SelectionType.Multi,
true,
undefined,
4,
);
expect(onSelectionChangeSpy).toHaveBeenLastCalledWith(
SelectionType.Multi,
true,
[3, 4],
);
});
}); | useIndexRow(); |
maketiles_old.py | import skimage.io
import numpy as np
import pandas as pd
import sys
from pathlib import Path
import pickle
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--base_dir", default='G:/Datasets/panda', required=False)
parser.add_argument("--out_dir", default='D:/Datasets/panda', required=False)
args = parser.parse_args()
BASE_PATH = Path(args.base_dir)
OUTPUT_BASE = Path(args.out_dir)
SIZE = 128
NUM = 16
LEVEL = 1
STRIDE = False
TRAIN_PATH = BASE_PATH/'train_images/'
MASKS_TRAIN_PATH = BASE_PATH/'train_label_masks/'
OUTPUT_IMG_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/imgs/'
OUTPUT_MASK_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/masks/'
PICKLE_NAME = OUTPUT_BASE/f'stats_{SIZE}_{LEVEL}.pkl'
CSV_PATH = BASE_PATH/'train.csv'
pen_marked_images = [
'fd6fe1a3985b17d067f2cb4d5bc1e6e1',
'ebb6a080d72e09f6481721ef9f88c472',
'ebb6d5ca45942536f78beb451ee43cc4',
'ea9d52d65500acc9b9d89eb6b82cdcdf',
'e726a8eac36c3d91c3c4f9edba8ba713',
'e90abe191f61b6fed6d6781c8305fe4b',
'fd0bb45eba479a7f7d953f41d574bf9f',
'ff10f937c3d52eff6ad4dd733f2bc3ac',
'feee2e895355a921f2b75b54debad328',
'feac91652a1c5accff08217d19116f1c',
'fb01a0a69517bb47d7f4699b6217f69d',
'f00ec753b5618cfb30519db0947fe724',
'e9a4f528b33479412ee019e155e1a197',
'f062f6c1128e0e9d51a76747d9018849',
'f39bf22d9a2f313425ee201932bac91a',
]
def remove_pen_marks(img):
# Define elliptic kernel
kernel5x5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# use cv2.inRange to mask pen marks (hardcoded for now)
lower = np.array([0, 0, 0])
upper = np.array([200, 255, 255])
img_mask1 = cv2.inRange(img, lower, upper)
# Use erosion and findContours to remove masked tissue (side effect of above)
img_mask1 = cv2.erode(img_mask1, kernel5x5, iterations=4)
img_mask2 = np.zeros(img_mask1.shape, dtype=np.uint8)
contours, _ = cv2.findContours(img_mask1, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
x, y = contour[:, 0, 0], contour[:, 0, 1]
w, h = x.max() - x.min(), y.max() - y.min()
if w > 100 and h > 100:
cv2.drawContours(img_mask2, [contour], 0, 1, -1)
# expand the area of the pen marks
img_mask2 = cv2.dilate(img_mask2, kernel5x5, iterations=3)
img_mask2 = (1 - img_mask2)
# Mask out pen marks from original image
img = cv2.bitwise_and(img, img, mask=img_mask2)
img[img == 0] = 255
return img, img_mask1, img_mask2
class TileMaker:
|
if __name__ == "__main__":
OUTPUT_IMG_PATH.mkdir(exist_ok=True, parents=True)
OUTPUT_MASK_PATH.mkdir(exist_ok=True, parents=True)
tile_maker = TileMaker(SIZE, NUM)
img_list = list(TRAIN_PATH.glob('**/*.tiff'))
# img_list.pop(5765)
bad_images = []
bad_masks = []
image_stats = []
files = []
for i, img_fn in enumerate(img_list):
img_id = img_fn.stem
mask_fn = MASKS_TRAIN_PATH / (img_id + '_mask.tiff')
try:
col = skimage.io.MultiImage(str(img_fn))
image = col[-LEVEL]
except:
bad_images.append(img_id)
continue
if img_id in pen_marked_images:
image, _, _ = remove_pen_marks(image)
if mask_fn.exists():
try:
mask = skimage.io.MultiImage(str(mask_fn))[-LEVEL]
except:
bad_masks.append(img_id)
mask = np.zeros_like(image)
else:
mask = np.zeros_like(image)
if STRIDE:
image, mask = tile_maker.make_multistride(image, mask)
else:
image, mask = tile_maker.make(image, mask)
sys.stdout.write(f'\r{i + 1}/{len(img_list)}')
image_stats.append({'image_id': img_id, 'mean': image.mean(axis=(0, 1, 2)) / 255,
'mean_square': ((image / 255) ** 2).mean(axis=(0, 1, 2)),
'img_mean': (255 - image).mean()})
for i, (tile_image, tile_mask) in enumerate(zip(image, mask)):
a = (img_id + '_' + str(i) + '.png')
b = (img_id + '_' + str(i) + '.png')
files.append({'image_id': img_id, 'num': i, 'filename': a, 'maskname': b,
'value': (255-tile_image[:, :, 0]).mean()})
skimage.io.imsave(OUTPUT_IMG_PATH / a, tile_image, check_contrast=False)
skimage.io.imsave(OUTPUT_MASK_PATH / b, tile_mask, check_contrast=False)
image_stats = pd.DataFrame(image_stats)
df = pd.read_csv(CSV_PATH)
df = pd.merge(df, image_stats, on='image_id', how='left')
df[['image_id', 'img_mean']].to_csv(OUTPUT_BASE/f'img_mean_{SIZE}_{LEVEL}.csv', index=False)
provider_stats = {}
for provider in df['data_provider'].unique():
mean = (df[df['data_provider'] == provider]['mean']).mean(0)
std = np.sqrt((df[df['data_provider'] == provider]['mean_square']).mean(0) - mean ** 2)
provider_stats[provider] = (mean, std)
mean = (df['mean']).mean()
std = np.sqrt((df['mean_square']).mean() - mean ** 2)
provider_stats['all'] = (mean, std)
with open(PICKLE_NAME, 'wb') as file:
pickle.dump(provider_stats, file)
pd.DataFrame(files).to_csv(OUTPUT_BASE/f'files_{SIZE}_{LEVEL}.csv', index=False)
print(bad_images)
print(bad_masks)
print(provider_stats)
| def __init__(self, size, number):
self.size = size
self.number = number
def make_multistride(self, image, mask):
# Pad only once
image, mask = self.__pad(image, mask)
s0, _ = self.__get_tiles(image, mask)
# For strided grids, need to also remove on the right/bottom
s1, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, :],
mask[self.size // 2:-self.size // 2, :])
s2, _ = self.__get_tiles(image[:, self.size // 2:-self.size // 2],
image[:, self.size // 2:-self.size // 2])
s3, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2],
image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2])
all_tiles = np.concatenate([s0, s1, s2, s3], axis=0)
# Find the images with the most stuff (the most red):
red_channel = all_tiles[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number * 4]
return all_tiles[sorted_tiles], _
def __pad(self, image, mask):
h, w, c = image.shape
horizontal_pad = 0 if (w % self.size) == 0 else self.size - (w % self.size)
vertical_pad = 0 if (h % self.size) == 0 else self.size - (h % self.size)
image = np.pad(image, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=255) # Empty is white in this data
mask = np.pad(mask, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=0) # Empty is black in this data
return image, mask
def __get_tiles(self, image, mask):
h, w, c = image.shape
image = image.reshape(h // self.size, self.size, w // self.size, self.size, c)
image = image.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
mask = mask.reshape(h // self.size, self.size, w // self.size, self.size, c)
mask = mask.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
if image.shape[0] < self.number:
image = np.pad(image, pad_width=((0, self.number - image.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=255)
mask = np.pad(mask, pad_width=((0, self.number - mask.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=0)
return image, mask
def make(self, image, mask):
image, mask = self.__pad(image, mask)
image, mask = self.__get_tiles(image, mask)
# Find the images with the most dark (epithelium) stuff
red_channel = image[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number]
return image[sorted_tiles], mask[sorted_tiles] |
ip.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use vec::MutableCloneableVector;
use to_str::ToStr;
use from_str::FromStr;
use option::{Option, None, Some};
pub type Port = u16;
#[deriving(Eq, TotalEq, Clone)]
pub enum IpAddr {
Ipv4Addr(u8, u8, u8, u8),
Ipv6Addr(u16, u16, u16, u16, u16, u16, u16, u16)
}
impl ToStr for IpAddr {
fn to_str(&self) -> ~str {
match *self {
Ipv4Addr(a, b, c, d) =>
format!("{}.{}.{}.{}", a, b, c, d),
// Ipv4 Compatible address
Ipv6Addr(0, 0, 0, 0, 0, 0, g, h) => {
format!("::{}.{}.{}.{}", (g >> 8) as u8, g as u8,
(h >> 8) as u8, h as u8)
}
// Ipv4-Mapped address
Ipv6Addr(0, 0, 0, 0, 0, 0xFFFF, g, h) => {
format!("::FFFF:{}.{}.{}.{}", (g >> 8) as u8, g as u8,
(h >> 8) as u8, h as u8)
}
Ipv6Addr(a, b, c, d, e, f, g, h) =>
format!("{}:{}:{}:{}:{}:{}:{}:{}", a, b, c, d, e, f, g, h)
}
}
}
#[deriving(Eq, TotalEq, Clone)]
pub struct SocketAddr {
ip: IpAddr,
port: Port,
}
impl ToStr for SocketAddr {
fn to_str(&self) -> ~str {
match self.ip {
Ipv4Addr(*) => format!("{}:{}", self.ip.to_str(), self.port),
Ipv6Addr(*) => format!("[{}]:{}", self.ip.to_str(), self.port),
}
}
}
struct Parser<'self> {
// parsing as ASCII, so can use byte array
s: &'self [u8],
pos: uint,
}
impl<'self> Parser<'self> {
fn | (s: &'self str) -> Parser<'self> {
Parser {
s: s.as_bytes(),
pos: 0,
}
}
fn is_eof(&self) -> bool {
self.pos == self.s.len()
}
// Commit only if parser returns Some
fn read_atomically<T>(&mut self, cb: &fn(&mut Parser) -> Option<T>) -> Option<T> {
let pos = self.pos;
let r = cb(self);
if r.is_none() {
self.pos = pos;
}
r
}
// Commit only if parser read till EOF
fn read_till_eof<T>(&mut self, cb: &fn(&mut Parser) -> Option<T>) -> Option<T> {
do self.read_atomically |p| {
cb(p).filtered(|_| p.is_eof())
}
}
// Return result of first successful parser
fn read_or<T>(&mut self, parsers: &[&fn(&mut Parser) -> Option<T>]) -> Option<T> {
for pf in parsers.iter() {
match self.read_atomically(|p: &mut Parser| (*pf)(p)) {
Some(r) => return Some(r),
None => {}
}
}
None
}
// Apply 3 parsers sequentially
fn read_seq_3<A, B, C>(&mut self,
pa: &fn(&mut Parser) -> Option<A>,
pb: &fn(&mut Parser) -> Option<B>,
pc: &fn(&mut Parser) -> Option<C>
) -> Option<(A, B, C)>
{
do self.read_atomically |p| {
let a = pa(p);
let b = if a.is_some() { pb(p) } else { None };
let c = if b.is_some() { pc(p) } else { None };
match (a, b, c) {
(Some(a), Some(b), Some(c)) => Some((a, b, c)),
_ => None
}
}
}
// Read next char
fn read_char(&mut self) -> Option<char> {
if self.is_eof() {
None
} else {
let r = self.s[self.pos] as char;
self.pos += 1;
Some(r)
}
}
// Return char and advance iff next char is equal to requested
fn read_given_char(&mut self, c: char) -> Option<char> {
do self.read_atomically |p| {
p.read_char().filtered(|&next| next == c)
}
}
// Read digit
fn read_digit(&mut self, radix: u8) -> Option<u8> {
fn parse_digit(c: char, radix: u8) -> Option<u8> {
let c = c as u8;
// assuming radix is either 10 or 16
if c >= '0' as u8 && c <= '9' as u8 {
Some((c - '0' as u8) as u8)
} else if radix > 10 && c >= 'a' as u8 && c < 'a' as u8 + (radix - 10) {
Some((c - 'a' as u8 + 10) as u8)
} else if radix > 10 && c >= 'A' as u8 && c < 'A' as u8 + (radix - 10) {
Some((c - 'A' as u8 + 10) as u8)
} else {
None
}
}
do self.read_atomically |p| {
p.read_char().and_then(|c| parse_digit(c, radix))
}
}
fn read_number_impl(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> {
let mut r = 0u32;
let mut digit_count = 0;
loop {
match self.read_digit(radix) {
Some(d) => {
r = r * (radix as u32) + (d as u32);
digit_count += 1;
if digit_count > max_digits || r >= upto {
return None
}
}
None => {
if digit_count == 0 {
return None
} else {
return Some(r)
}
}
};
}
}
// Read number, failing if max_digits of number value exceeded
fn read_number(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> {
do self.read_atomically |p| {
p.read_number_impl(radix, max_digits, upto)
}
}
fn read_ipv4_addr_impl(&mut self) -> Option<IpAddr> {
let mut bs = [0u8, ..4];
let mut i = 0;
while i < 4 {
if i != 0 && self.read_given_char('.').is_none() {
return None;
}
let octet = self.read_number(10, 3, 0x100).map(|n| n as u8);
match octet {
Some(d) => bs[i] = d,
None => return None,
};
i += 1;
}
Some(Ipv4Addr(bs[0], bs[1], bs[2], bs[3]))
}
// Read IPv4 address
fn read_ipv4_addr(&mut self) -> Option<IpAddr> {
do self.read_atomically |p| {
p.read_ipv4_addr_impl()
}
}
fn read_ipv6_addr_impl(&mut self) -> Option<IpAddr> {
fn ipv6_addr_from_head_tail(head: &[u16], tail: &[u16]) -> IpAddr {
assert!(head.len() + tail.len() <= 8);
let mut gs = [0u16, ..8];
gs.copy_from(head);
gs.mut_slice(8 - tail.len(), 8).copy_from(tail);
Ipv6Addr(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7])
}
fn read_groups(p: &mut Parser, groups: &mut [u16, ..8], limit: uint) -> (uint, bool) {
let mut i = 0;
while i < limit {
if i < limit - 1 {
let ipv4 = do p.read_atomically |p| {
if i == 0 || p.read_given_char(':').is_some() {
p.read_ipv4_addr()
} else {
None
}
};
match ipv4 {
Some(Ipv4Addr(a, b, c, d)) => {
groups[i + 0] = (a as u16 << 8) | (b as u16);
groups[i + 1] = (c as u16 << 8) | (d as u16);
return (i + 2, true);
}
_ => {}
}
}
let group = do p.read_atomically |p| {
if i == 0 || p.read_given_char(':').is_some() {
p.read_number(16, 4, 0x10000).map(|n| n as u16)
} else {
None
}
};
match group {
Some(g) => groups[i] = g,
None => return (i, false)
}
i += 1;
}
(i, false)
}
let mut head = [0u16, ..8];
let (head_size, head_ipv4) = read_groups(self, &mut head, 8);
if head_size == 8 {
return Some(Ipv6Addr(
head[0], head[1], head[2], head[3],
head[4], head[5], head[6], head[7]))
}
// IPv4 part is not allowed before `::`
if head_ipv4 {
return None
}
// read `::` if previous code parsed less than 8 groups
if !self.read_given_char(':').is_some() || !self.read_given_char(':').is_some() {
return None;
}
let mut tail = [0u16, ..8];
let (tail_size, _) = read_groups(self, &mut tail, 8 - head_size);
Some(ipv6_addr_from_head_tail(head.slice(0, head_size), tail.slice(0, tail_size)))
}
fn read_ipv6_addr(&mut self) -> Option<IpAddr> {
do self.read_atomically |p| {
p.read_ipv6_addr_impl()
}
}
fn read_ip_addr(&mut self) -> Option<IpAddr> {
let ipv4_addr = |p: &mut Parser| p.read_ipv4_addr();
let ipv6_addr = |p: &mut Parser| p.read_ipv6_addr();
self.read_or([ipv4_addr, ipv6_addr])
}
fn read_socket_addr(&mut self) -> Option<SocketAddr> {
let ip_addr = |p: &mut Parser| {
let ipv4_p = |p: &mut Parser| p.read_ip_addr();
let ipv6_p = |p: &mut Parser| {
let open_br = |p: &mut Parser| p.read_given_char('[');
let ip_addr = |p: &mut Parser| p.read_ipv6_addr();
let clos_br = |p: &mut Parser| p.read_given_char(']');
p.read_seq_3::<char, IpAddr, char>(open_br, ip_addr, clos_br)
.map(|t| match t { (_, ip, _) => ip })
};
p.read_or([ipv4_p, ipv6_p])
};
let colon = |p: &mut Parser| p.read_given_char(':');
let port = |p: &mut Parser| p.read_number(10, 5, 0x10000).map(|n| n as u16);
// host, colon, port
self.read_seq_3::<IpAddr, char, u16>(ip_addr, colon, port)
.map(|t| match t { (ip, _, port) => SocketAddr { ip: ip, port: port } })
}
}
impl FromStr for IpAddr {
fn from_str(s: &str) -> Option<IpAddr> {
do Parser::new(s).read_till_eof |p| {
p.read_ip_addr()
}
}
}
impl FromStr for SocketAddr {
fn from_str(s: &str) -> Option<SocketAddr> {
do Parser::new(s).read_till_eof |p| {
p.read_socket_addr()
}
}
}
#[cfg(test)]
mod test {
use super::*;
use from_str::FromStr;
use option::{Option, Some, None};
#[test]
fn test_from_str_ipv4() {
assert_eq!(Some(Ipv4Addr(127, 0, 0, 1)), FromStr::from_str("127.0.0.1"));
assert_eq!(Some(Ipv4Addr(255, 255, 255, 255)), FromStr::from_str("255.255.255.255"));
assert_eq!(Some(Ipv4Addr(0, 0, 0, 0)), FromStr::from_str("0.0.0.0"));
// out of range
let none: Option<IpAddr> = FromStr::from_str("256.0.0.1");
assert_eq!(None, none);
// too short
let none: Option<IpAddr> = FromStr::from_str("255.0.0");
assert_eq!(None, none);
// too long
let none: Option<IpAddr> = FromStr::from_str("255.0.0.1.2");
assert_eq!(None, none);
// no number between dots
let none: Option<IpAddr> = FromStr::from_str("255.0..1");
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv6() {
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 0)), FromStr::from_str("0:0:0:0:0:0:0:0"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1)), FromStr::from_str("0:0:0:0:0:0:0:1"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1)), FromStr::from_str("::1"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 0)), FromStr::from_str("::"));
assert_eq!(Some(Ipv6Addr(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)),
FromStr::from_str("2a02:6b8::11:11"));
// too long group
let none: Option<IpAddr> = FromStr::from_str("::00000");
assert_eq!(None, none);
// too short
let none: Option<IpAddr> = FromStr::from_str("1:2:3:4:5:6:7");
assert_eq!(None, none);
// too long
let none: Option<IpAddr> = FromStr::from_str("1:2:3:4:5:6:7:8:9");
assert_eq!(None, none);
// triple colon
let none: Option<IpAddr> = FromStr::from_str("1:2:::6:7:8");
assert_eq!(None, none);
// two double colons
let none: Option<IpAddr> = FromStr::from_str("1:2::6::8");
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv4_in_ipv6() {
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 49152, 545)),
FromStr::from_str("::192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)),
FromStr::from_str("::FFFF:192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
FromStr::from_str("64:ff9b::192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
FromStr::from_str("2001:db8:122:c000:2:2100:192.0.2.33"));
// colon after v4
let none: Option<IpAddr> = FromStr::from_str("::127.0.0.1:");
assert_eq!(None, none);
// not enought groups
let none: Option<IpAddr> = FromStr::from_str("1.2.3.4.5:127.0.0.1");
assert_eq!(None, none);
// too many groups
let none: Option<IpAddr> =
FromStr::from_str("1.2.3.4.5:6:7:127.0.0.1");
assert_eq!(None, none);
}
#[test]
fn test_from_str_socket_addr() {
assert_eq!(Some(SocketAddr { ip: Ipv4Addr(77, 88, 21, 11), port: 80 }),
FromStr::from_str("77.88.21.11:80"));
assert_eq!(Some(SocketAddr { ip: Ipv6Addr(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), port: 53 }),
FromStr::from_str("[2a02:6b8:0:1::1]:53"));
assert_eq!(Some(SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0x7F00, 1), port: 22 }),
FromStr::from_str("[::127.0.0.1]:22"));
// without port
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1");
assert_eq!(None, none);
// without port
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1:");
assert_eq!(None, none);
// wrong brackets around v4
let none: Option<SocketAddr> = FromStr::from_str("[127.0.0.1]:22");
assert_eq!(None, none);
// port out of range
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1:123456");
assert_eq!(None, none);
}
#[test]
fn ipv6_addr_to_str() {
let a1 = Ipv6Addr(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
assert!(a1.to_str() == ~"::ffff:192.0.2.128" || a1.to_str() == ~"::FFFF:192.0.2.128");
}
}
| new |
test_sensor.py | """Tests for greeneye_monitor sensors."""
from unittest.mock import AsyncMock
from homeassistant.components.greeneye_monitor.sensor import (
DATA_PULSES,
DATA_WATT_SECONDS,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import (
RegistryEntryDisabler,
async_get as get_entity_registry,
)
from .common import (
MULTI_MONITOR_CONFIG,
SINGLE_MONITOR_CONFIG_POWER_SENSORS,
SINGLE_MONITOR_CONFIG_PULSE_COUNTERS,
SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS,
SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS,
SINGLE_MONITOR_SERIAL_NUMBER,
connect_monitor,
setup_greeneye_monitor_component_with_config,
)
from .conftest import assert_sensor_state
async def test_sensor_does_not_exist_before_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor does not exist before its monitor is connected."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
entity_registry = get_entity_registry(hass)
assert entity_registry.async_get("sensor.voltage_1") is None
async def test_sensors_created_when_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that sensors get created when the monitor first connects."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 1
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_sensors_created_during_setup_if_monitor_already_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that sensors get created during setup if the monitor happens to connect really quickly."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_disable_sensor_after_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled after its monitor connected stops listening for sensor changes."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitor.voltage_sensor.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitor.voltage_sensor.listeners) == 0
async def test_updates_state_when_sensor_pushes(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor entity updates its state when the underlying sensor pushes an update."""
# The sensor base class handles triggering state updates, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
monitor.voltage_sensor.voltage = 119.8
monitor.voltage_sensor.notify_all_listeners()
assert_sensor_state(hass, "sensor.voltage_1", "119.8")
async def test_power_sensor_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the power sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute watts)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(
hass, "sensor.channel_1", STATE_UNKNOWN, {DATA_WATT_SECONDS: 1000}
)
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(
hass, "sensor.channel_two", STATE_UNKNOWN, {DATA_WATT_SECONDS: -400}
)
async def test_power_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a power sensor reports its values correctly, including handling net metering."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
) | monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.channels[0].watts = 120.0
monitor.channels[1].watts = 120.0
monitor.channels[0].notify_all_listeners()
monitor.channels[1].notify_all_listeners()
assert_sensor_state(hass, "sensor.channel_1", "120.0", {DATA_WATT_SECONDS: 1000})
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(hass, "sensor.channel_two", "120.0", {DATA_WATT_SECONDS: -400})
async def test_pulse_counter_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the pulse counter sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute pulses per time)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.pulse_counters[0].pulses_per_second = None
monitor.pulse_counters[1].pulses_per_second = None
monitor.pulse_counters[2].pulses_per_second = None
monitor.pulse_counters[0].notify_all_listeners()
monitor.pulse_counters[1].notify_all_listeners()
monitor.pulse_counters[2].notify_all_listeners()
assert_sensor_state(hass, "sensor.pulse_a", STATE_UNKNOWN, {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", STATE_UNKNOWN, {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", STATE_UNKNOWN, {DATA_PULSES: 1000})
async def test_pulse_counter(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a pulse counter sensor reports its values properly, including calculating different units."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.pulse_a", "10.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", "300.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", "18000.0", {DATA_PULSES: 1000})
async def test_temperature_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a temperature sensor reports its values properly, including proper handling of when its native unit is different from that configured in hass."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
# The config says that the sensor is reporting in Fahrenheit; if we set that up
# properly, HA will have converted that to Celsius by default.
assert_sensor_state(hass, "sensor.temp_a", "0.0")
async def test_voltage_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a voltage sensor reports its values properly."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_multi_monitor_sensors(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that sensors still work when multiple monitors are registered."""
await setup_greeneye_monitor_component_with_config(hass, MULTI_MONITOR_CONFIG)
await connect_monitor(hass, monitors, 1)
await connect_monitor(hass, monitors, 2)
await connect_monitor(hass, monitors, 3)
assert_sensor_state(hass, "sensor.unit_1_temp_1", "32.0")
assert_sensor_state(hass, "sensor.unit_2_temp_1", "0.0")
assert_sensor_state(hass, "sensor.unit_3_temp_1", "32.0")
async def disable_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Disable the given entity."""
entity_registry = get_entity_registry(hass)
entity_registry.async_update_entity(
entity_id, disabled_by=RegistryEntryDisabler.USER
)
await hass.async_block_till_done() | |
basic.rs | use anyhow::Context as _;
use anyhow::Result;
use async_trait::async_trait;
use asynchronous_codec::Bytes;
use futures::SinkExt;
use futures::StreamExt;
use libp2p_core::multiaddr::Protocol;
use libp2p_core::Multiaddr;
use std::collections::HashSet;
use std::time::Duration;
use tokio_tasks::Tasks;
use xtra::message_channel::StrongMessageChannel;
use xtra::spawn::TokioGlobalSpawnExt;
use xtra::Actor;
use xtra::Address;
use xtra_libp2p::libp2p::identity::Keypair;
use xtra_libp2p::libp2p::transport::MemoryTransport;
use xtra_libp2p::libp2p::PeerId;
use xtra_libp2p::Connect;
use xtra_libp2p::Disconnect;
use xtra_libp2p::Endpoint;
use xtra_libp2p::GetConnectionStats;
use xtra_libp2p::ListenOn;
use xtra_libp2p::NewInboundSubstream;
use xtra_libp2p::OpenSubstream;
use xtra_productivity::xtra_productivity;
#[tokio::test]
async fn hello_world() {
let alice_hello_world_handler = HelloWorld::default().create(None).spawn_global();
let (alice_peer_id, _, _alice, bob, _) = alice_and_bob(
[(
"/hello-world/1.0.0",
alice_hello_world_handler.clone_channel(),
)],
[],
)
.await;
let bob_to_alice = bob
.send(OpenSubstream::single_protocol(
alice_peer_id,
"/hello-world/1.0.0",
))
.await
.unwrap()
.unwrap();
let string = hello_world_dialer(bob_to_alice, "Bob").await.unwrap();
assert_eq!(string, "Hello Bob!");
}
#[tokio::test]
async fn after_connect_see_each_other_as_connected() {
let (alice_peer_id, bob_peer_id, alice, bob, _) = alice_and_bob([], []).await;
let alice_stats = alice.send(GetConnectionStats).await.unwrap();
let bob_stats = bob.send(GetConnectionStats).await.unwrap();
assert_eq!(alice_stats.connected_peers, HashSet::from([bob_peer_id]));
assert_eq!(bob_stats.connected_peers, HashSet::from([alice_peer_id]));
}
#[tokio::test]
async fn disconnect_is_reflected_in_stats() {
let (_, bob_peer_id, alice, bob, _) = alice_and_bob([], []).await;
alice.send(Disconnect(bob_peer_id)).await.unwrap();
let alice_stats = alice.send(GetConnectionStats).await.unwrap();
let bob_stats = bob.send(GetConnectionStats).await.unwrap();
assert_eq!(alice_stats.connected_peers, HashSet::from([]));
assert_eq!(bob_stats.connected_peers, HashSet::from([]));
}
#[tokio::test]
async fn listen_address_is_reflected_in_stats() {
let (_, _, alice, _, listen_address) = alice_and_bob([], []).await;
let alice_stats = alice.send(GetConnectionStats).await.unwrap();
assert_eq!(
alice_stats.listen_addresses,
HashSet::from([listen_address])
);
}
#[tokio::test]
async fn cannot_open_substream_for_unhandled_protocol() {
let (_, bob_peer_id, alice, _bob, _) = alice_and_bob([], []).await;
let error = alice
.send(OpenSubstream::single_protocol(
bob_peer_id,
"/foo/bar/1.0.0",
))
.await
.unwrap()
.unwrap_err();
assert!(matches!(
error,
xtra_libp2p::Error::NegotiationFailed(xtra_libp2p::NegotiationError::Failed)
))
}
#[tokio::test]
async fn cannot_connect_twice() {
let (alice_peer_id, _bob_peer_id, _alice, bob, alice_listen) = alice_and_bob([], []).await;
let error = bob
.send(Connect(
alice_listen.with(Protocol::P2p(alice_peer_id.into())),
))
.await
.unwrap()
.unwrap_err();
assert!(matches!(
error,
xtra_libp2p::Error::AlreadyConnected(twin) if twin == alice_peer_id
))
}
#[tokio::test]
async fn chooses_first_protocol_in_list_of_multiple() {
let alice_hello_world_handler = HelloWorld::default().create(None).spawn_global();
let (alice_peer_id, _, _alice, bob, _) = alice_and_bob(
[(
"/hello-world/1.0.0",
alice_hello_world_handler.clone_channel(),
)],
[],
)
.await;
let (actual_protocol, _) = bob
.send(OpenSubstream::multiple_protocols(
alice_peer_id,
vec![
"/hello-world/1.0.0",
"/foo-bar/1.0.0", // This is unsupported by Alice. | .await
.unwrap()
.unwrap();
assert_eq!(actual_protocol, "/hello-world/1.0.0");
}
#[cfg_attr(debug_assertions, tokio::test)] // The assertion for duplicate handlers only runs in debug mode.
#[should_panic(expected = "Duplicate handler declared for protocol /hello-world/1.0.0")]
async fn disallow_duplicate_handlers() {
let hello_world_handler = HelloWorld::default().create(None).spawn_global();
make_endpoint([
("/hello-world/1.0.0", hello_world_handler.clone_channel()),
("/hello-world/1.0.0", hello_world_handler.clone_channel()),
]);
}
#[tokio::test]
async fn falls_back_to_next_protocol_if_unsupported() {
let alice_hello_world_handler = HelloWorld::default().create(None).spawn_global();
let (alice_peer_id, _, _alice, bob, _) = alice_and_bob(
[(
"/hello-world/1.0.0",
alice_hello_world_handler.clone_channel(),
)],
[],
)
.await;
let (actual_protocol, _) = bob
.send(OpenSubstream::multiple_protocols(
alice_peer_id,
vec![
"/foo-bar/1.0.0", // This is unsupported by Alice.
"/hello-world/1.0.0",
],
))
.await
.unwrap()
.unwrap();
assert_eq!(actual_protocol, "/hello-world/1.0.0");
}
async fn alice_and_bob<const AN: usize, const BN: usize>(
alice_inbound_substream_handlers: [(
&'static str,
Box<dyn StrongMessageChannel<NewInboundSubstream>>,
); AN],
bob_inbound_substream_handlers: [(
&'static str,
Box<dyn StrongMessageChannel<NewInboundSubstream>>,
); BN],
) -> (
PeerId,
PeerId,
Address<Endpoint>,
Address<Endpoint>,
Multiaddr,
) {
let port = rand::random::<u16>();
let (alice_peer_id, alice) = make_endpoint(alice_inbound_substream_handlers);
let (bob_peer_id, bob) = make_endpoint(bob_inbound_substream_handlers);
let alice_listen = format!("/memory/{port}").parse::<Multiaddr>().unwrap();
alice.send(ListenOn(alice_listen.clone())).await.unwrap();
bob.send(Connect(
format!("/memory/{port}/p2p/{alice_peer_id}")
.parse()
.unwrap(),
))
.await
.unwrap()
.unwrap();
(alice_peer_id, bob_peer_id, alice, bob, alice_listen)
}
fn make_endpoint<const N: usize>(
substream_handlers: [(
&'static str,
Box<dyn StrongMessageChannel<NewInboundSubstream>>,
); N],
) -> (PeerId, Address<Endpoint>) {
let id = Keypair::generate_ed25519();
let peer_id = id.public().to_peer_id();
let endpoint = Endpoint::new(
MemoryTransport::default(),
id,
Duration::from_secs(20),
substream_handlers,
)
.create(None)
.spawn_global();
(peer_id, endpoint)
}
#[derive(Default)]
struct HelloWorld {
tasks: Tasks,
}
#[xtra_productivity(message_impl = false)]
impl HelloWorld {
async fn handle(&mut self, msg: NewInboundSubstream) {
tracing::info!("New hello world stream from {}", msg.peer);
self.tasks
.add_fallible(hello_world_listener(msg.stream), move |e| async move {
tracing::warn!("Hello world protocol with peer {} failed: {}", msg.peer, e);
});
}
}
#[async_trait]
impl Actor for HelloWorld {
type Stop = ();
async fn stopped(self) -> Self::Stop {}
}
async fn hello_world_dialer(stream: xtra_libp2p::Substream, name: &'static str) -> Result<String> {
let mut stream = asynchronous_codec::Framed::new(stream, asynchronous_codec::LengthCodec);
stream.send(Bytes::from(name)).await?;
let bytes = stream.next().await.context("Expected message")??;
let message = String::from_utf8(bytes.to_vec())?;
Ok(message)
}
async fn hello_world_listener(stream: xtra_libp2p::Substream) -> Result<()> {
let mut stream =
asynchronous_codec::Framed::new(stream, asynchronous_codec::LengthCodec).fuse();
let bytes = stream.select_next_some().await?;
let name = String::from_utf8(bytes.to_vec())?;
stream.send(Bytes::from(format!("Hello {name}!"))).await?;
Ok(())
} | ],
)) |
kendo.culture.tn.min.js | /**
* Kendo UI v2016.3.1118 (http://www.telerik.com/kendo-ui)
* Copyright 2016 Telerik AD. All rights reserved.
*
* Kendo UI commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-complete
* If you do not own a commercial license, this file shall be governed by the trial license terms.
|
*/
!function(e){"function"==typeof define&&define.amd?define(["kendo.core.min"],e):e()}(function(){!function(e,M){kendo.cultures.tn={name:"tn",numberFormat:{pattern:["-n"],decimals:2,",":",",".":".",groupSize:[3],percent:{pattern:["-%n","%n"],decimals:2,",":",",".":".",groupSize:[3],symbol:"%"},currency:{name:"",abbr:"",pattern:["$-n","$ n"],decimals:2,",":",",".":".",groupSize:[3],symbol:"R"}},calendars:{standard:{days:{names:["Sontaga","Mosupologo","Labobedi","Laboraro","Labone","Labotlhano","Matlhatso"],namesAbbr:["Sont.","Mos.","Lab.","Labr.","Labn.","Labt.","Matlh."],namesShort:["So","Ms","Lb","Lr","Ln","Lt","Ma"]},months:{names:["Ferikgong","Tlhakole","Mopitlwe","Moranang","Motsheganong","Seetebosigo","Phukwi","Phatwe","Lwetse","Diphalane","Ngwanatsele","Sedimothole"],namesAbbr:["Fer.","Tlh.","Mop.","Mor.","Motsh.","Seet.","Phk.","Pht.","Lwetse.","Diph.","Ngwn.","Sed."]},AM:["Mo Mosong","mo mosong","MO MOSONG"],PM:["Mo Maitseboeng","mo maitseboeng","MO MAITSEBOENG"],patterns:{d:"dd/MM/yy",D:"dd MMMM yyyy",F:"dd MMMM yyyy hh:mm:ss tt",g:"dd/MM/yy hh:mm tt",G:"dd/MM/yy hh:mm:ss tt",m:"d MMMM",M:"d MMMM",s:"yyyy'-'MM'-'dd'T'HH':'mm':'ss",t:"hh:mm tt",T:"hh:mm:ss tt",u:"yyyy'-'MM'-'dd HH':'mm':'ss'Z'",y:"MMMM yyyy",Y:"MMMM yyyy"},"/":"/",":":":",firstDay:1}}}}(this)});
//# sourceMappingURL=kendo.culture.tn.min.js.map | |
sb.starboardAlreadyStarred.ts | import * as DJS from "discord.js";
import Bot from "structures/Bot";
import Event from "structures/Event";
export default class | extends Event {
constructor(bot: Bot) {
super(bot, "starboardAlreadyStarred");
}
async execute(bot: Bot, _: string, message: DJS.Message, user: DJS.User) {
if (!message.guild?.available) return;
if (
!(message.channel as DJS.TextChannel)
.permissionsFor(message.guild.me!)
?.has(DJS.Permissions.FLAGS.SEND_MESSAGES)
) {
return;
}
const lang = await bot.utils.getGuildLang(message.guild?.id);
return message.channel.send({
content: lang.EVENTS.STARBOARD_MESSAGE.replace("{userTag}", user.tag),
});
}
}
| StarboardAlreadyStarredEvent |
mod.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#[cfg(test)]
mod consensusdb_test;
mod schema;
use crate::consensusdb::schema::{
block::{BlockSchema, SchemaBlock},
quorum_certificate::QCSchema,
single_entry::{SingleEntryKey, SingleEntrySchema},
};
use anyhow::{ensure, Result};
use consensus_types::{block::Block, common::Payload, quorum_cert::QuorumCert};
use libra_crypto::HashValue;
use libra_logger::prelude::*;
use schema::{BLOCK_CF_NAME, QC_CF_NAME, SINGLE_ENTRY_CF_NAME};
use schemadb::{ReadOptions, SchemaBatch, DB, DEFAULT_CF_NAME};
use std::{collections::HashMap, iter::Iterator, path::Path, time::Instant};
type HighestTimeoutCertificate = Vec<u8>;
type VoteMsgData = Vec<u8>;
pub struct ConsensusDB {
db: DB,
}
impl ConsensusDB {
pub fn new<P: AsRef<Path> + Clone>(db_root_path: P) -> Self |
pub fn get_data<T: Payload>(
&self,
) -> Result<(
Option<VoteMsgData>,
Option<HighestTimeoutCertificate>,
Vec<Block<T>>,
Vec<QuorumCert>,
)> {
let last_vote_msg_data = self.get_last_vote_msg_data()?;
let highest_timeout_certificate = self.get_highest_timeout_certificate()?;
let consensus_blocks = self
.get_blocks()?
.into_iter()
.map(|(_block_hash, block_content)| block_content)
.collect::<Vec<_>>();
let consensus_qcs = self
.get_quorum_certificates()?
.into_iter()
.map(|(_block_hash, qc)| qc)
.collect::<Vec<_>>();
Ok((
last_vote_msg_data,
highest_timeout_certificate,
consensus_blocks,
consensus_qcs,
))
}
pub fn save_highest_timeout_certificate(
&self,
highest_timeout_certificate: HighestTimeoutCertificate,
) -> Result<()> {
let mut batch = SchemaBatch::new();
batch.put::<SingleEntrySchema>(
&SingleEntryKey::HighestTimeoutCertificate,
&highest_timeout_certificate,
)?;
self.commit(batch)
}
pub fn save_state(&self, last_vote: VoteMsgData) -> Result<()> {
let mut batch = SchemaBatch::new();
batch.put::<SingleEntrySchema>(&SingleEntryKey::LastVoteMsg, &last_vote)?;
self.commit(batch)
}
pub fn save_blocks_and_quorum_certificates<T: Payload>(
&self,
block_data: Vec<Block<T>>,
qc_data: Vec<QuorumCert>,
) -> Result<()> {
ensure!(
!block_data.is_empty() || !qc_data.is_empty(),
"Consensus block and qc data is empty!"
);
let mut batch = SchemaBatch::new();
block_data
.iter()
.map(|block| {
batch.put::<BlockSchema<T>>(
&block.id(),
&SchemaBlock::<T>::from_block(block.clone()),
)
})
.collect::<Result<()>>()?;
qc_data
.iter()
.map(|qc| batch.put::<QCSchema>(&qc.certified_block().id(), qc))
.collect::<Result<()>>()?;
self.commit(batch)
}
pub fn delete_blocks_and_quorum_certificates<T: Payload>(
&self,
block_ids: Vec<HashValue>,
) -> Result<()> {
ensure!(!block_ids.is_empty(), "Consensus block ids is empty!");
let mut batch = SchemaBatch::new();
block_ids
.iter()
.map(|hash| {
batch.delete::<BlockSchema<T>>(hash)?;
batch.delete::<QCSchema>(hash)
})
.collect::<Result<_>>()?;
self.commit(batch)
}
/// Write the whole schema batch including all data necessary to mutate the ledger
/// state of some transaction by leveraging rocksdb atomicity support.
fn commit(&self, batch: SchemaBatch) -> Result<()> {
self.db.write_schemas(batch)
}
/// Get latest timeout certificates (we only store the latest highest timeout certificates).
fn get_highest_timeout_certificate(&self) -> Result<Option<Vec<u8>>> {
self.db
.get::<SingleEntrySchema>(&SingleEntryKey::HighestTimeoutCertificate)
}
/// Delete the timeout certificates
pub fn delete_highest_timeout_certificate(&self) -> Result<()> {
let mut batch = SchemaBatch::new();
batch.delete::<SingleEntrySchema>(&SingleEntryKey::HighestTimeoutCertificate)?;
self.commit(batch)
}
/// Get latest vote message data (if available)
fn get_last_vote_msg_data(&self) -> Result<Option<Vec<u8>>> {
self.db
.get::<SingleEntrySchema>(&SingleEntryKey::LastVoteMsg)
}
pub fn delete_last_vote_msg(&self) -> Result<()> {
let mut batch = SchemaBatch::new();
batch.delete::<SingleEntrySchema>(&SingleEntryKey::LastVoteMsg)?;
self.commit(batch)
}
/// Get all consensus blocks.
fn get_blocks<T: Payload>(&self) -> Result<HashMap<HashValue, Block<T>>> {
let mut iter = self.db.iter::<BlockSchema<T>>(ReadOptions::default())?;
iter.seek_to_first();
iter.map(|value| value.and_then(|(k, v)| Ok((k, v.borrow_into_block().clone()))))
.collect::<Result<HashMap<HashValue, Block<T>>>>()
}
/// Get all consensus QCs.
fn get_quorum_certificates(&self) -> Result<HashMap<HashValue, QuorumCert>> {
let mut iter = self.db.iter::<QCSchema>(ReadOptions::default())?;
iter.seek_to_first();
iter.collect::<Result<HashMap<HashValue, QuorumCert>>>()
}
}
| {
let column_families = vec![
/* UNUSED CF = */ DEFAULT_CF_NAME,
BLOCK_CF_NAME,
QC_CF_NAME,
SINGLE_ENTRY_CF_NAME,
];
let path = db_root_path.as_ref().join("consensusdb");
let instant = Instant::now();
let db = DB::open(path.clone(), column_families)
.expect("ConsensusDB open failed; unable to continue");
info!(
"Opened ConsensusDB at {:?} in {} ms",
path,
instant.elapsed().as_millis()
);
Self { db }
} |
favs_v1.py | import json, base64, requests
from jinja2 import Environment, FileSystemLoader
from pathlib import Path
import webbrowser
import tweepy
import sys
class Twitter:
def __init__(self, auth, Bearer_Token=None):
self.auth = auth
self.Bearer_Token = Bearer_Token
if self.auth is None:
raise ValueError("API Authorisation keys needed.")
if (self.auth.get("API_key", None) == None) or (self.auth.get("API_secret_key", None) == None):
raise ValueError("'API_key' and 'API_scret_key' not specified")
Path().absolute().joinpath("data").mkdir(parents=True, exist_ok=True)
def GET_BearerToken(self):
credentials = self.auth["API_key"] + ":" + self.auth["API_secret_key"]
b64_creds = base64.b64encode(credentials.encode()).decode()
headers = {
"Authorization" : "Basic " + b64_creds,
"Content-Type" : "application/x-www-form-urlencoded",
"charset" : "UTF-8"
}
payload = {
"grant_type" : "client_credentials"
}
r = requests.post(
"https://api.twitter.com/oauth2/token",
headers=headers,
data=payload
)
self.Bearer_Token = r.json()
later_use = input("Save Bearer Token for later use? [y/n] : ")
if (later_use.lower() == "y"):
with open("Bearer_Token.json", "w", encoding="UTF-8") as bt_file:
json.dump(self.Bearer_Token, bt_file)
print("Saved to Bearer_Token.json")
return self.Bearer_Token
def GET_Favourites(self, screen_name, count, Bearer_Token=None):
if Bearer_Token == None:
print("Bearer Token not specified.\n Using from Class Instance.")
Bearer_Token = self.Bearer_Token
if Bearer_Token == None:
raise ValueError("Class instance not initialized Bearer_Token")
headers = {
"Authorization" : Bearer_Token["token_type"] +
" " +
Bearer_Token["access_token"]
}
payload = {
"screen_name" : screen_name,
"count" : count,
"tweet_mode" : "extended"
}
r = requests.get(
"https://api.twitter.com/1.1/favorites/list.json",
headers=headers,
params=payload
)
favs = r.json()
with open("data/favs.json", "w", encoding='UTF-8') as favs_file:
json.dump(favs, favs_file, indent=4, ensure_ascii=False)
print("{} Favourite Tweets Captured".format(len(favs)))
return favs
def GET_MediaTweets(self, favs=None):
if favs == None :
with open("data/favs.json", "r", encoding="UTF-8") as fav_file:
favs = json.load(fav_file)
media_tweets = []
for tweet in favs:
extended_entity = tweet.get("extended_entities", None)
if extended_entity != None:
media_tweet = {}
media_tweet["created_at"] = tweet["created_at"]
media_tweet["media"] = tweet["extended_entities"]["media"]
media_tweets.append(media_tweet)
with open("data/media_tweets.json", "w", encoding="UTF-8") as media_tweets_file:
json.dump(media_tweets, media_tweets_file, indent=4, ensure_ascii=False)
print("{} Tweets with Media".format(len(media_tweets)))
return media_tweets
def GET_Media(self, media_tweets=None):
if media_tweets == None:
with open("data/media_tweets.json", "r", encoding="UTF-8") as media_tweets_file:
media_tweets = json.load(media_tweets_file)
custom_media_list = []
for tweet in media_tweets:
media_ele_list = tweet["media"]
for media_ele in media_ele_list:
custom_media_ele = {}
custom_media_ele["created_at"] = tweet["created_at"]
custom_media_ele["type"] = media_ele["type"]
custom_media_ele["id"] = media_ele["id_str"]
if custom_media_ele["type"] == "video" or custom_media_ele["type"] == "animated_gif":
variants = {}
for video_variants in media_ele["video_info"]["variants"]:
if video_variants["content_type"] == "video/mp4":
variants[video_variants["bitrate"]] = video_variants["url"]
max_bitrate = max(variants.keys())
custom_media_ele["url"] = variants[max_bitrate]
else:
custom_media_ele["url"] = media_ele["media_url"]
custom_media_list.append(custom_media_ele)
with open("data/media.json", "w", encoding="UTF-8") as media_file:
json.dump(custom_media_list, media_file, indent=4, ensure_ascii=False)
print("{} Media".format(len(custom_media_list)))
return custom_media_list
def visualize(self, media=None):
if media == None:
with open("data/media.json", "r", encoding="UTF-8") as media_file:
media_list = json.load(media_file)
else:
media_list = media
file_loader = FileSystemLoader('Templates')
env = Environment(loader=file_loader) | output = template.render(media_list=media_list)
with open("./data/media.html", "w") as index_file:
index_file.write(output)
webbrowser.open("data\\media.html")
with open("auth.json", "r") as auth_file:
auth = json.load(auth_file)
twpy = Twitter(auth)
twpy.visualize(
twpy.GET_Media(
twpy.GET_MediaTweets(
twpy.GET_Favourites(
"Twitter", 200, twpy.GET_BearerToken()
)
)
)
) |
template = env.get_template('media.html') |
test_cli.py | import re
import subprocess
from subprocess import CalledProcessError
import tempfile
import pytest
import optuna
from optuna.cli import _Studies
from optuna.exceptions import CLIUsageError
from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX
from optuna.storages import RDBStorage
from optuna.testing.storage import StorageSupplier
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import List # NOQA
from optuna.trial import Trial # NOQA
def test_create_study_command():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
# Create study.
command = ["optuna", "create-study", "--storage", storage_url]
subprocess.check_call(command)
# Command output should be in name string format (no-name + UUID).
study_name = str(subprocess.check_output(command).decode().strip())
name_re = r"^no-name-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$"
assert re.match(name_re, study_name) is not None
# study_name should be stored in storage.
study_id = storage.get_study_id_from_name(study_name)
assert study_id == 2
def test_create_study_command_with_study_name():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = "test_study"
# Create study with name.
command = ["optuna", "create-study", "--storage", storage_url, "--study-name", study_name]
study_name = str(subprocess.check_output(command).decode().strip())
# Check if study_name is stored in the storage.
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_name_from_id(study_id) == study_name
def test_create_study_command_without_storage_url():
# type: () -> None
with pytest.raises(subprocess.CalledProcessError) as err:
subprocess.check_output(["optuna", "create-study"])
usage = err.value.output.decode()
assert usage.startswith("usage:")
def test_create_study_command_with_direction():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
command = ["optuna", "create-study", "--storage", storage_url, "--direction", "minimize"]
study_name = str(subprocess.check_output(command).decode().strip())
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_direction(study_id) == optuna.structs.StudyDirection.MINIMIZE
command = ["optuna", "create-study", "--storage", storage_url, "--direction", "maximize"]
study_name = str(subprocess.check_output(command).decode().strip())
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_direction(study_id) == optuna.structs.StudyDirection.MAXIMIZE
command = ["optuna", "create-study", "--storage", storage_url, "--direction", "test"]
# --direction should be either 'minimize' or 'maximize'.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(command)
def | ():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = "delete-study-test"
# Create study.
command = ["optuna", "create-study", "--storage", storage_url, "--study-name", study_name]
subprocess.check_call(command)
assert study_name in {s.study_name: s for s in storage.get_all_study_summaries()}
# Delete study.
command = ["optuna", "delete-study", "--storage", storage_url, "--study-name", study_name]
subprocess.check_call(command)
assert study_name not in {s.study_name: s for s in storage.get_all_study_summaries()}
def test_delete_study_command_without_storage_url():
# type: () -> None
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(["optuna", "delete-study", "--study-name", "dummy_study"])
def test_study_set_user_attr_command():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
# Create study.
study_name = storage.get_study_name_from_id(storage.create_new_study())
base_command = [
"optuna",
"study",
"set-user-attr",
"--study",
study_name,
"--storage",
storage_url,
]
example_attrs = {"architecture": "ResNet", "baselen_score": "0.002"}
for key, value in example_attrs.items():
subprocess.check_call(base_command + ["--key", key, "--value", value])
# Attrs should be stored in storage.
study_id = storage.get_study_id_from_name(study_name)
study_user_attrs = storage.get_study_user_attrs(study_id)
assert len(study_user_attrs) == 2
assert all([study_user_attrs[k] == v for k, v in example_attrs.items()])
def test_studies_command():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
# First study.
study_1 = optuna.create_study(storage)
# Second study.
study_2 = optuna.create_study(storage, study_name="study_2")
study_2.optimize(objective_func, n_trials=10)
# Run command.
command = ["optuna", "studies", "--storage", storage_url]
output = str(subprocess.check_output(command).decode().strip())
rows = output.split("\n")
def get_row_elements(row_index):
# type: (int) -> List[str]
return [r.strip() for r in rows[row_index].split("|")[1:-1]]
assert len(rows) == 6
assert tuple(get_row_elements(1)) == _Studies._study_list_header
# Check study_name and n_trials for the first study.
elms = get_row_elements(3)
assert elms[0] == study_1.study_name
assert elms[2] == "0"
# Check study_name and n_trials for the second study.
elms = get_row_elements(4)
assert elms[0] == study_2.study_name
assert elms[2] == "10"
def test_create_study_command_with_skip_if_exists():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = "test_study"
# Create study with name.
command = ["optuna", "create-study", "--storage", storage_url, "--study-name", study_name]
study_name = str(subprocess.check_output(command).decode().strip())
# Check if study_name is stored in the storage.
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_name_from_id(study_id) == study_name
# Try to create the same name study without `--skip-if-exists` flag (error).
command = ["optuna", "create-study", "--storage", storage_url, "--study-name", study_name]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(command)
# Try to create the same name study with `--skip-if-exists` flag (OK).
command = [
"optuna",
"create-study",
"--storage",
storage_url,
"--study-name",
study_name,
"--skip-if-exists",
]
study_name = str(subprocess.check_output(command).decode().strip())
new_study_id = storage.get_study_id_from_name(study_name)
assert study_id == new_study_id # The existing study instance is reused.
def test_dashboard_command():
# type: () -> None
with StorageSupplier("new") as storage, tempfile.NamedTemporaryFile("r") as tf_report:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = storage.get_study_name_from_id(storage.create_new_study())
command = [
"optuna",
"dashboard",
"--study",
study_name,
"--out",
tf_report.name,
"--storage",
storage_url,
]
subprocess.check_call(command)
html = tf_report.read()
assert "<body>" in html
assert "bokeh" in html
@pytest.mark.parametrize(
"origins", [["192.168.111.1:5006"], ["192.168.111.1:5006", "192.168.111.2:5006"]]
)
def test_dashboard_command_with_allow_websocket_origin(origins):
# type: (List[str]) -> None
with StorageSupplier("new") as storage, tempfile.NamedTemporaryFile("r") as tf_report:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = storage.get_study_name_from_id(storage.create_new_study())
command = [
"optuna",
"dashboard",
"--study",
study_name,
"--out",
tf_report.name,
"--storage",
storage_url,
]
for origin in origins:
command.extend(["--allow-websocket-origin", origin])
subprocess.check_call(command)
html = tf_report.read()
assert "<body>" in html
assert "bokeh" in html
# An example of objective functions for testing study optimize command
def objective_func(trial):
# type: (Trial) -> float
x = trial.suggest_uniform("x", -10, 10)
return (x + 5) ** 2
def test_study_optimize_command():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
study_name = storage.get_study_name_from_id(storage.create_new_study())
command = [
"optuna",
"study",
"optimize",
"--study",
study_name,
"--n-trials",
"10",
__file__,
"objective_func",
"--storage",
storage_url,
]
subprocess.check_call(command)
study = optuna.load_study(storage=storage_url, study_name=study_name)
assert len(study.trials) == 10
assert "x" in study.best_params
# Check if a default value of study_name is stored in the storage.
assert storage.get_study_name_from_id(study._study_id).startswith(
DEFAULT_STUDY_NAME_PREFIX
)
def test_study_optimize_command_inconsistent_args():
# type: () -> None
with tempfile.NamedTemporaryFile() as tf:
db_url = "sqlite:///{}".format(tf.name)
# --study argument is missing.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(
[
"optuna",
"study",
"optimize",
"--storage",
db_url,
"--n-trials",
"10",
__file__,
"objective_func",
]
)
def test_empty_argv():
# type: () -> None
command_empty = ["optuna"]
command_empty_output = str(subprocess.check_output(command_empty))
command_help = ["optuna", "help"]
command_help_output = str(subprocess.check_output(command_help))
assert command_empty_output == command_help_output
def test_check_storage_url():
# type: () -> None
storage_in_args = "sqlite:///args.db"
assert storage_in_args == optuna.cli._check_storage_url(storage_in_args)
with pytest.raises(CLIUsageError):
optuna.cli._check_storage_url(None)
def test_storage_upgrade_command():
# type: () -> None
with StorageSupplier("new") as storage:
assert isinstance(storage, RDBStorage)
storage_url = str(storage.engine.url)
command = ["optuna", "storage", "upgrade"]
with pytest.raises(CalledProcessError):
subprocess.check_call(command)
command.extend(["--storage", storage_url])
subprocess.check_call(command)
| test_delete_study_command |
furnace_test.scene.js | import { quad, block } from './helpers'
const lightMaterial = {
lambertian: {
// albedo: [1, 1, 1],
albedo: [0, 0, 0],
// emittance: [20, 15, 10]
emittance: [10, 10, 10]
}
}
const skyMaterial = {
lambertian: {
// albedo: [0.2, 0.2, 0.2],
albedo: [0, 0, 0],
emittance: [0xdd / 255, 0xee / 255, 1]
// emittance: [0.1, 0.1, 0.1]
}
}
const glassMaterial = {
// transmissive: {
fresnelSpecularTransmissive: {
// albedo: [0.9, 0.9, 0.9],
albedo: [1, 1, 1],
// refractiveIndex: 1.8
refractiveIndex: 1.62
// refractiveIndex: 1.333
}
}
const waterMaterial = {
// transmissive: {
fresnelSpecularTransmissive: {
albedo: [1, 1, 1],
// albedo: [0.7, 0.9, 1],
// albedo: [1, 1, 1],
// refractiveIndex: 1.8
// refractiveIndex: 1.62
refractiveIndex: 1.333
}
}
const mirrorMaterial = {
specular: {
albedo: [0.9, 0.9, 0.9],
emittance: [0, 0, 0]
}
}
const whiteMaterial = {
lambertian: {
albedo: [0.8, 0.8, 0.8]
}
}
const redMaterial = {
lambertian: {
albedo: [0.75, 0.25, 0.25]
}
}
const greenMaterial = {
lambertian: {
albedo: [0.25, 0.75, 0.25]
}
}
const cornellBox = [
// LIGHT
...quad(lightMaterial)(
[0.25, 1.0, 0.25],
[-0.25, 1.0, 0.25],
[-0.25, 1.0, -0.25],
[0.25, 1.0, -0.25]
),
// CEILING
...quad(whiteMaterial)(
[-0.25, 1.0, 0.25],
[-1, 1.0, 1],
[-1, 1.0, -1],
[-0.25, 1.0, -0.25]
),
...quad(whiteMaterial)(
[1, 1.0, 1],
[0.25, 1.0, 0.25],
[0.25, 1.0, -0.25],
[1, 1.0, -1]
),
...quad(whiteMaterial)(
[1, 1.0, 1],
[-1, 1.0, 1],
[-0.25, 1.0, 0.25],
[0.25, 1.0, 0.25]
),
...quad(whiteMaterial)(
[0.25, 1.0, -0.25],
[-0.25, 1.0, -0.25],
[-1, 1.0, -1],
[1, 1.0, -1]
),
// FLOOR
...quad(whiteMaterial)(
[-1.0, -1.0, 1.0],
[1.0, -1.0, 1.0],
[1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0]
),
// // BACK
...quad(whiteMaterial)(
[-1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, -1.0, 1.0],
[-1.0, -1.0, 1.0]
),
// RIGHT
...quad(greenMaterial)(
[1.0, 1.0, 1.0],
[1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[1.0, -1.0, 1.0]
),
// LEFT
...quad(redMaterial)(
[-1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0]
)
]
const cheapCornellBox = [
// // LIGHT
// {
// sphere: {
// center: [0, 1, 0],
// radius: 0.25,
// material: lightMaterial
// }
// },
// {
// implicit: {
// distancefunction: [
// { dfBox: [0.2, 0.02, 0.2] },
// { dfTranslate: [0.05, 1.02, 0.05] }
// ],
// material: lightMaterial
// }
// },
// CEILING
{
plane: {
normal: [0, -1, 0],
d: -1,
// material: whiteMaterial
material: skyMaterial
}
},
// FLOOR
{
plane: {
normal: [0, 1, 0],
d: -1,
material: whiteMaterial
}
},
// // BACK
{
plane: {
normal: [0, 0, -1],
d: -1,
// material: mirrorMaterial
material: whiteMaterial
}
},
// // FRONT (BEHIND CAMERA)
{
plane: {
normal: [0, 0, 1],
d: -1,
material: {
lambertian: {
albedo: [0, 0, 0]
}
}
}
},
// RIGHT
{
plane: {
normal: [-1, 0, 0],
d: -1,
material: greenMaterial
}
},
// LEFT
{
plane: {
normal: [1, 0, 0],
d: -1,
material: redMaterial
}
} | const skyAndGround = [
// SKY
{
sphere: {
center: [0, 1, 0],
radius: 100,
material: skyMaterial
}
}
// {
// plane: {
// normal: [0, 1, 0],
// d: -1,
// material: {
// lambertian: {
// // albedo: [0xff / 255, 0xe7 / 255, 0xd9 / 255]
// // albedo: [0.4, 0.25, 0.25]
// // albedo: [0.5, 0.5, 0.5]
// albedo: [0.5, 0.5, 0.5]
// // emittance: [0.1, 0.1, 0.1]
// }
// }
// }
// }
]
export default {
maxDepth: 20,
camera: {
position: [0, 0, -4],
basis: {
tangent: [1, 0, 0],
bitangent: [0, -1, 0],
normal: [0, 0, 1]
},
aperture: 0,
fieldOfView: 1 / 3,
// fieldOfView: 0.35,
focalLength: 10
// tMin: 0.00001,
// tMax: Infinity
},
geometry: {
group: [
// ...skyAndGround,
...cheapCornellBox,
// TWISTED BOX
{
implicit: {
distancefunction: [
{ dfBox: [0.3, 0.3, 0.3] },
// { dfTwist: Math.PI / 2 }
{ dfRotate: Math.PI / 10 }
// { dfTwist: Math.PI }
// { dfTranslate: [0.35, -0.75, -0.35] }
],
// material: whiteMaterial
material: glassMaterial
// material: redMaterial
}
}
// // TORUS
// {
// implicit: {
// distancefunction: [
// { dfTorus: { major: 0.25, minor: 0.15 } }
// // { dfTwist: Math.PI },
// // { dfTranslate: [0.35, -0.5, -0.35] }
// ],
// // material: whiteMaterial
// // material: glassMaterial
// material: redMaterial
// }
// }
// // SPHERE
// {
// sphere: {
// center: [0, 0, 0],
// // radius: 0,
// radius: 0.4,
// // material: whiteMaterial
// // material: mirrorMaterial
// material: glassMaterial
// }
// }
]
}
} | ]
|
main.go | package main
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containerd/console"
dockerfile "github.com/docker/dockerfile/builder"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/appdefaults"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"golang.org/x/sync/errgroup"
)
func main() {
app := cli.NewApp()
app.Name = "build-using-dockerfile"
app.UsageText = `build-using-dockerfile [OPTIONS] PATH | URL | -`
app.Description = `
build using Dockerfile.
This command mimics behavior of "docker build" command so that people can easily get started with BuildKit.
This command is NOT the replacement of "docker build", and should NOT be used for building production images.
By default, the built image is loaded to Docker.
`
dockerIncompatibleFlags := []cli.Flag{
cli.StringFlag{
Name: "buildkit-addr",
Usage: "buildkit daemon address",
EnvVar: "BUILDKIT_HOST",
Value: appdefaults.Address,
},
cli.BoolFlag{
Name: "clientside-frontend",
Usage: "run dockerfile frontend client side, rather than builtin to buildkitd",
EnvVar: "BUILDKIT_CLIENTSIDE_FRONTEND",
},
}
app.Flags = append([]cli.Flag{
cli.StringSliceFlag{
Name: "build-arg",
Usage: "Set build-time variables",
},
cli.StringFlag{
Name: "file, f",
Usage: "Name of the Dockerfile (Default is 'PATH/Dockerfile')",
},
cli.StringFlag{
Name: "tag, t",
Usage: "Name and optionally a tag in the 'name:tag' format",
},
cli.StringFlag{
Name: "target",
Usage: "Set the target build stage to build.",
},
cli.BoolFlag{
Name: "no-cache",
Usage: "Do not use cache when building the image",
},
}, dockerIncompatibleFlags...)
app.Action = action
if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
func | (clicontext *cli.Context) error {
ctx := appcontext.Context()
if tag := clicontext.String("tag"); tag == "" {
return errors.New("tag is not specified")
}
c, err := client.New(ctx, clicontext.String("buildkit-addr"), client.WithFailFast())
if err != nil {
return err
}
pipeR, pipeW := io.Pipe()
solveOpt, err := newSolveOpt(clicontext, pipeW)
if err != nil {
return err
}
ch := make(chan *client.SolveStatus)
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
var err error
if clicontext.Bool("clientside-frontend") {
_, err = c.Build(ctx, *solveOpt, "", dockerfile.Build, ch)
} else {
_, err = c.Solve(ctx, nil, *solveOpt, ch)
}
return err
})
eg.Go(func() error {
var c console.Console
if cn, err := console.ConsoleFromFile(os.Stderr); err == nil {
c = cn
}
// not using shared context to not disrupt display but let is finish reporting errors
_, err = progressui.DisplaySolveStatus(context.TODO(), "", c, os.Stdout, ch)
return err
})
eg.Go(func() error {
if err := loadDockerTar(pipeR); err != nil {
return err
}
return pipeR.Close()
})
if err := eg.Wait(); err != nil {
return err
}
logrus.Infof("Loaded the image %q to Docker.", clicontext.String("tag"))
return nil
}
func newSolveOpt(clicontext *cli.Context, w io.WriteCloser) (*client.SolveOpt, error) {
buildCtx := clicontext.Args().First()
if buildCtx == "" {
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
} else if buildCtx == "-" {
return nil, errors.New("stdin not supported yet")
}
file := clicontext.String("file")
if file == "" {
file = filepath.Join(buildCtx, "Dockerfile")
}
localDirs := map[string]string{
"context": buildCtx,
"dockerfile": filepath.Dir(file),
}
frontend := "dockerfile.v0" // TODO: use gateway
if clicontext.Bool("clientside-frontend") {
frontend = ""
}
frontendAttrs := map[string]string{
"filename": filepath.Base(file),
}
if target := clicontext.String("target"); target != "" {
frontendAttrs["target"] = target
}
if clicontext.Bool("no-cache") {
frontendAttrs["no-cache"] = ""
}
for _, buildArg := range clicontext.StringSlice("build-arg") {
kv := strings.SplitN(buildArg, "=", 2)
if len(kv) != 2 {
return nil, errors.Errorf("invalid build-arg value %s", buildArg)
}
frontendAttrs["build-arg:"+kv[0]] = kv[1]
}
return &client.SolveOpt{
Exports: []client.ExportEntry{
{
Type: "docker", // TODO: use containerd image store when it is integrated to Docker
Attrs: map[string]string{
"name": clicontext.String("tag"),
},
Output: func(_ map[string]string) (io.WriteCloser, error) {
return w, nil
},
},
},
LocalDirs: localDirs,
Frontend: frontend,
FrontendAttrs: frontendAttrs,
}, nil
}
func loadDockerTar(r io.Reader) error {
// no need to use moby/moby/client here
cmd := exec.Command("docker", "load")
cmd.Stdin = r
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
| action |
test_stock.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mvport` package."""
import unittest
import numpy as np
from mvport.stock import Stock
class TestStock(unittest.TestCase):
"""Tests for `mvport` package."""
def setUp(self):
"""SetUp."""
self.ticker = 'AAPL'
self.returns = [-2, -1, 0, 1, 2]
self.stock = Stock(self.ticker, self.returns)
def test_get_ticker(self):
"""Test get_ticker."""
self.assertEqual(self.stock.get_ticker(), self.ticker)
def | (self):
"""Test set_ticker."""
self.stock.set_ticker('new_ticker')
self.assertEqual(self.stock.get_ticker(), 'new_ticker')
def test_get_returns(self):
"""Test get_returns."""
np.testing.assert_array_equal(self.stock.get_returns(), np.array(self.returns))
def test_set_returns(self):
"""Test set_ticker."""
self.stock.set_returns([-1, 0, 1])
np.testing.assert_array_equal(self.stock.get_returns(), np.array([-1, 0, 1]))
def test_get_mean(self):
"""Test get_mean."""
self.assertEqual(self.stock.get_mean(), 0)
self.stock.set_returns([0, 1, 2])
self.assertEqual(self.stock.get_mean(), 1)
def test_get_variance(self):
"""Test get_variance."""
self.assertEqual(self.stock.get_variance(), 2)
self.stock.set_returns([-3,-1,0,1,3])
self.assertEqual(self.stock.get_variance(), 4)
if __name__ == '__main__':
sys.exit(unittest.main())
| test_set_ticker |
measure.rs | //! Utils functions for measuring the PVN NFs.
use crate::utils::Flow;
use serde_json::{from_reader, Value};
use statrs::statistics::{Max, Mean, Median, Min};
use statrs::statistics::{OrderStatistics, Variance};
use std::collections::HashMap;
use std::fs::File;
use std::time::Instant;
/// Epsilon.
pub const EPSILON: usize = 1000;
/// Number of packets to ignore before starting measurement. Currently deprecated.
pub const NUM_TO_IGNORE: usize = 0;
/// Estimated number of packets for allocating large size array for RDR NF.
pub const RDR_MEASURED_PKT: usize = 100_000_000;
/// Estimated number of packets for allocating large size array.
pub const TOTAL_MEASURED_PKT: usize = 200_000_000;
/// Time for the short experiment with instrumentation.
pub const SHORT_MEASURE_TIME: u64 = 181;
/// Time for the medium experiment with instrumentation.
pub const MEDIUM_MEASURE_TIME: u64 = 301;
/// Time for the long experiment with instrumentation.
pub const LONG_MEASURE_TIME: u64 = 601;
/// Time for the application experiment.
pub const APP_MEASURE_TIME: u64 = 610;
/// Fake flow when retrieving flow failed.
pub fn fake_flow() -> Flow {
Flow {
src_ip: 0_u32,
dst_ip: 0_u32,
src_port: 0_u16,
dst_port: 0_u16,
proto: 0_u8,
}
}
/// experiment parameters.
#[derive(Debug, Clone, Copy)]
pub struct ExprParam {
/// setup (workload level)
pub setup: usize,
/// TLSV setup
pub tlsv_setup: usize,
/// RDR setup
pub rdr_setup: usize,
/// XCDR setup
pub xcdr_setup: usize,
/// P2P setup
pub p2p_setup: usize,
/// iteration of this run
pub iter: usize,
/// whether we have turned on latency instrumentation
pub inst: bool,
/// running experiment time
pub expr_time: u64,
}
/// Read various params from setup.
///
/// Currently returns: *setup* (which setup it is), *iter* (which iteration it
/// is), *inst* (instrumentation for retrieving latencies for every packet),
/// and *expr running time* (how long the NF will run).
///
/// This impl probably can be optimized.
pub fn read_setup_param(file_path: String) -> Option<ExprParam> {
let file = File::open(file_path.clone()).expect("file should open read only");
let read_json = file_path + "should be proper JSON";
let json: Value = from_reader(file).expect(&read_json);
let setup: Option<String> = match serde_json::from_value(json.get("setup").expect("file should have setup").clone())
{
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response for setup: {}", e);
None
}
};
let setup = setup.unwrap().parse::<usize>();
// setup all NF setups
let tlsv_setup: Option<String> =
match serde_json::from_value(json.get("tlsv_setup").expect("file could have tlsv setup").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response for tlsv_setup: {}", e);
None
}
};
let rdr_setup: Option<String> =
match serde_json::from_value(json.get("rdr_setup").expect("file should have rdr_setup").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response for rdr_setup: {}", e);
None
}
};
let xcdr_setup: Option<String> =
match serde_json::from_value(json.get("xcdr_setup").expect("file should have xcdr_setup").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response for xcdr_setup: {}", e);
None
}
};
let p2p_setup: Option<String> =
match serde_json::from_value(json.get("p2p_setup").expect("file should have p2p_setup").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response for p2p_setup: {}", e);
None
}
};
let tlsv_setup = tlsv_setup.unwrap().parse::<usize>();
let rdr_setup = rdr_setup.unwrap().parse::<usize>();
let xcdr_setup = xcdr_setup.unwrap().parse::<usize>();
let p2p_setup = p2p_setup.unwrap().parse::<usize>();
let iter: Option<String> = match serde_json::from_value(json.get("iter").expect("file should have iter").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response: {}", e);
None
}
};
let iter = iter.unwrap().parse::<usize>();
let inst: Option<String> = match serde_json::from_value(json.get("inst").expect("file should have inst").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response: {}", e);
None
}
};
let inst_val = match &*inst.unwrap() {
"on" => Some(true),
"off" => Some(false),
_ => None,
};
let mode: Option<String> = match serde_json::from_value(json.get("mode").expect("file should have mode").clone()) {
Ok(val) => Some(val),
Err(e) => {
println!("Malformed JSON response: {}", e);
None
}
};
let expr_time = match &*mode.unwrap() {
"short" => Some(SHORT_MEASURE_TIME),
"medium" => Some(MEDIUM_MEASURE_TIME),
"long" => Some(LONG_MEASURE_TIME),
_ => None,
};
if let (
Ok(setup),
Ok(tlsv_setup),
Ok(rdr_setup),
Ok(xcdr_setup),
Ok(p2p_setup),
Ok(iter),
Some(inst),
Some(expr_time),
) = (
setup, tlsv_setup, rdr_setup, xcdr_setup, p2p_setup, iter, inst_val, expr_time,
) {
Some(ExprParam {
setup,
tlsv_setup,
rdr_setup,
xcdr_setup,
p2p_setup,
iter,
inst,
expr_time,
})
} else {
None
}
}
/// Merge all the timestamps we have and generate meaningful latencies for each
/// packet.
///
/// The current implementation just works so please don't touch the code unless
/// you have time to verify the correctness.
pub fn merge_ts(
total_measured_pkt: usize,
stop_ts_matched: Vec<Instant>,
stop_ts_not_matched: HashMap<usize, Instant>,
) -> HashMap<usize, Instant> {
let mut actual_ts = HashMap::<usize, Instant>::with_capacity(total_measured_pkt);
let mut not_matched_c = 0;
for pivot in 1..total_measured_pkt + 1 {
if stop_ts_not_matched.contains_key(&pivot) { | not_matched_c += 1;
} else {
// NOTE: we need this early stop because of the drifting behavior in groupby operations
if pivot - not_matched_c - 1 == stop_ts_matched.len() {
println!("merging finished!",);
return actual_ts;
}
actual_ts.insert(pivot - 1, stop_ts_matched[pivot - not_matched_c - 1]);
}
}
println!("This should never be reached!",);
actual_ts
}
/// Compute statistics for the latency results collected.
pub fn compute_stat(mut tmp_results: Vec<u128>) {
tmp_results.sort_unstable();
let mut results: Vec<f64> = tmp_results.into_iter().map(|item| item as f64).collect();
let bar = results.percentile(99);
let (rest, mut main): (_, Vec<_>) = results.into_iter().partition(|x| x >= &bar);
println!("sorting and then type casting done",);
println!("Details of the results in rest",);
let rest_chunk_size = rest.len() / 100 + 1;
//generate 100 groups
for (rest_count, rest_chunk) in rest.chunks(rest_chunk_size).enumerate() {
println!(
"Rest_group {:?}, median: {:02?}, mean: {:02?}, std dev: {:02?}",
rest_count,
rest_chunk.median(),
rest_chunk.mean(),
rest_chunk.std_dev()
);
}
println!("Details of the results in main",);
let main_chunk_size = main.len() / 100 + 1;
//generate 100 groups
for (main_count, main_chunk) in main.chunks(main_chunk_size).enumerate() {
println!(
"Group {:?}, median: {:02?}, mean: {:02?}, std dev: {:02?}",
main_count,
main_chunk.median(),
main_chunk.mean(),
main_chunk.std_dev()
);
}
let min = main.min();
let max = main.max();
println!(
"Stat_extra, mean: {:?}, median: {:?}, std: {:?}, 90%iles: {:?}, 95%iles: {:?}, ",
main.mean(),
main.median(),
main.std_dev(),
main.percentile(90),
main.percentile(95),
);
println!(
"Stat, min: {:?}, 25%iles: {:?}, 50%iles: {:?}, 75%iles: {:?}, max: {:?}",
min,
main.percentile(25),
main.percentile(50),
main.percentile(75),
max,
);
} | // non tcp ts
let item = stop_ts_not_matched.get(&pivot).unwrap();
actual_ts.insert(pivot - 1, *item);
// println!("INSERT: pivot: {:?} is {:?}", pivot - 1, *item); |
map.rs | //! TODO: docs
use std::borrow::Borrow;
use std::fmt;
use std::iter::FromIterator;
use std::mem::ManuallyDrop;
use std::ops::{Bound, RangeBounds};
use std::ptr;
use base::{self, try_pin_loop};
use epoch;
/// A map based on a lock-free skip list.
pub struct SkipMap<K, V> {
inner: base::SkipList<K, V>,
}
impl<K, V> SkipMap<K, V> {
/// Returns a new, empty map.
pub fn new() -> SkipMap<K, V> {
SkipMap {
inner: base::SkipList::new(epoch::default_collector().clone()),
}
}
/// Returns `true` if the map is empty.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns the number of entries in the map.
///
/// If the map is being concurrently modified, consider the returned number just an
/// approximation without any guarantees.
pub fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> SkipMap<K, V>
where
K: Ord,
{
/// Returns the entry with the smallest key.
pub fn front(&self) -> Option<Entry<K, V>> {
let guard = &epoch::pin();
try_pin_loop(|| self.inner.front(guard)).map(Entry::new)
}
/// Returns the entry with the largest key.
pub fn back(&self) -> Option<Entry<K, V>> {
let guard = &epoch::pin();
try_pin_loop(|| self.inner.back(guard)).map(Entry::new)
}
/// Returns `true` if the map contains a value for the specified key.
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Ord + ?Sized,
{
let guard = &epoch::pin();
self.inner.contains_key(key, guard)
}
/// Returns an entry with the specified `key`.
pub fn get<Q>(&self, key: &Q) -> Option<Entry<K, V>>
where
K: Borrow<Q>,
Q: Ord + ?Sized,
{
let guard = &epoch::pin();
try_pin_loop(|| self.inner.get(key, guard)).map(Entry::new)
}
/// Returns an `Entry` pointing to the lowest element whose key is above
/// the given bound. If no such element is found then `None` is
/// returned.
pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option<Entry<'a, K, V>>
where
K: Borrow<Q>,
Q: Ord + ?Sized,
{
let guard = &epoch::pin();
try_pin_loop(|| self.inner.lower_bound(bound, guard)).map(Entry::new)
}
/// Returns an `Entry` pointing to the highest element whose key is below
/// the given bound. If no such element is found then `None` is
/// returned.
pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option<Entry<'a, K, V>>
where
K: Borrow<Q>,
Q: Ord + ?Sized,
{
let guard = &epoch::pin();
try_pin_loop(|| self.inner.upper_bound(bound, guard)).map(Entry::new)
}
/// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist.
pub fn get_or_insert(&self, key: K, value: V) -> Entry<K, V> {
let guard = &epoch::pin();
Entry::new(self.inner.get_or_insert(key, value, guard))
}
/// Returns an iterator over all entries in the map.
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.inner.ref_iter(),
}
}
/// Returns an iterator over a subset of entries in the skip list.
pub fn range<Q, R>(&self, range: R) -> Range<'_, Q, R, K, V>
where
K: Borrow<Q>,
R: RangeBounds<Q>,
Q: Ord + ?Sized,
{
Range {
inner: self.inner.ref_range(range),
}
}
}
impl<K, V> SkipMap<K, V>
where
K: Ord + Send + 'static,
V: Send + 'static,
{
/// Inserts a `key`-`value` pair into the map and returns the new entry.
///
/// If there is an existing entry with this key, it will be removed before inserting the new
/// one.
pub fn insert(&self, key: K, value: V) -> Entry<K, V> {
let guard = &epoch::pin();
Entry::new(self.inner.insert(key, value, guard))
}
/// Removes an entry with the specified `key` from the map and returns it.
pub fn remove<Q>(&self, key: &Q) -> Option<Entry<K, V>>
where
K: Borrow<Q>,
Q: Ord + ?Sized,
{
let guard = &epoch::pin();
self.inner.remove(key, guard).map(Entry::new)
}
/// Removes an entry from the front of the map.
pub fn pop_front(&self) -> Option<Entry<K, V>> {
let guard = &epoch::pin();
self.inner.pop_front(guard).map(Entry::new)
}
/// Removes an entry from the back of the map.
pub fn pop_back(&self) -> Option<Entry<K, V>> {
let guard = &epoch::pin();
self.inner.pop_back(guard).map(Entry::new)
}
/// Iterates over the map and removes every entry.
pub fn clear(&self) {
let guard = &mut epoch::pin();
self.inner.clear(guard);
}
}
impl<K, V> Default for SkipMap<K, V> {
fn default() -> SkipMap<K, V> {
SkipMap::new()
}
}
impl<K, V> fmt::Debug for SkipMap<K, V>
where
K: Ord + fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("SkipMap { .. }")
}
}
impl<K, V> IntoIterator for SkipMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
IntoIter {
inner: self.inner.into_iter(),
}
}
}
impl<'a, K, V> IntoIterator for &'a SkipMap<K, V>
where
K: Ord,
{
type Item = Entry<'a, K, V>;
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
impl<K, V> FromIterator<(K, V)> for SkipMap<K, V>
where
K: Ord,
{
fn from_iter<I>(iter: I) -> SkipMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
{
let s = SkipMap::new();
for (k, v) in iter {
s.get_or_insert(k, v);
}
s
}
}
/// A reference-counted entry in a map.
pub struct Entry<'a, K: 'a, V: 'a> {
inner: ManuallyDrop<base::RefEntry<'a, K, V>>,
}
impl<'a, K, V> Entry<'a, K, V> {
fn new(inner: base::RefEntry<'a, K, V>) -> Entry<'a, K, V> {
Entry {
inner: ManuallyDrop::new(inner),
}
}
/// Returns a reference to the key.
pub fn key(&self) -> &K {
self.inner.key()
}
/// Returns a reference to the value.
pub fn value(&self) -> &V {
self.inner.value()
}
/// Returns `true` if the entry is removed from the map.
pub fn is_removed(&self) -> bool {
self.inner.is_removed()
}
}
impl<'a, K, V> Drop for Entry<'a, K, V> {
fn drop(&mut self) {
unsafe {
ManuallyDrop::into_inner(ptr::read(&mut self.inner)).release_with_pin(epoch::pin);
}
}
}
impl<'a, K, V> Entry<'a, K, V>
where
K: Ord,
{
/// Moves to the next entry in the map.
pub fn move_next(&mut self) -> bool {
let guard = &epoch::pin();
self.inner.move_next(guard)
}
/// Moves to the previous entry in the map.
pub fn move_prev(&mut self) -> bool {
let guard = &epoch::pin();
self.inner.move_prev(guard)
}
/// Returns the next entry in the map.
pub fn next(&self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.next(guard).map(Entry::new)
}
/// Returns the previous entry in the map.
pub fn prev(&self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.prev(guard).map(Entry::new)
}
}
impl<'a, K, V> Entry<'a, K, V> | {
/// Removes the entry from the map.
///
/// Returns `true` if this call removed the entry and `false` if it was already removed.
pub fn remove(&self) -> bool {
let guard = &epoch::pin();
self.inner.remove(guard)
}
}
impl<'a, K, V> Clone for Entry<'a, K, V> {
fn clone(&self) -> Entry<'a, K, V> {
Entry {
inner: self.inner.clone(),
}
}
}
impl<'a, K, V> fmt::Debug for Entry<'a, K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Entry")
.field(self.key())
.field(self.value())
.finish()
}
}
/// An owning iterator over the entries of a `SkipMap`.
pub struct IntoIter<K, V> {
inner: base::IntoIter<K, V>,
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
self.inner.next()
}
}
impl<K, V> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("IntoIter { .. }")
}
}
/// An iterator over the entries of a `SkipMap`.
pub struct Iter<'a, K: 'a, V: 'a> {
inner: base::RefIter<'a, K, V>,
}
impl<'a, K, V> Iterator for Iter<'a, K, V>
where
K: Ord,
{
type Item = Entry<'a, K, V>;
fn next(&mut self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.next(guard).map(Entry::new)
}
}
impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V>
where
K: Ord,
{
fn next_back(&mut self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.next_back(guard).map(Entry::new)
}
}
impl<'a, K, V> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Iter { .. }")
}
}
/// An iterator over the entries of a `SkipMap`.
pub struct Range<'a, Q, R, K: 'a, V: 'a>
where
K: Ord + Borrow<Q>,
R: RangeBounds<Q>,
Q: Ord + ?Sized,
{
pub(crate) inner: base::RefRange<'a, Q, R, K, V>,
}
impl<'a, Q, R, K, V> Iterator for Range<'a, Q, R, K, V>
where
K: Ord + Borrow<Q>,
R: RangeBounds<Q>,
Q: Ord + ?Sized,
{
type Item = Entry<'a, K, V>;
fn next(&mut self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.next(guard).map(Entry::new)
}
}
impl<'a, Q, R, K, V> DoubleEndedIterator for Range<'a, Q, R, K, V>
where
K: Ord + Borrow<Q>,
R: RangeBounds<Q>,
Q: Ord + ?Sized,
{
fn next_back(&mut self) -> Option<Entry<'a, K, V>> {
let guard = &epoch::pin();
self.inner.next_back(guard).map(Entry::new)
}
}
impl<'a, Q, R, K, V> fmt::Debug for Range<'a, Q, R, K, V>
where
K: Ord + Borrow<Q> + fmt::Debug,
V: fmt::Debug,
R: RangeBounds<Q> + fmt::Debug,
Q: Ord + ?Sized,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Range")
.field("range", &self.inner.range)
.field("head", &self.inner.head)
.field("tail", &self.inner.tail)
.finish()
}
} | where
K: Ord + Send + 'static,
V: Send + 'static, |
empresas.read.ts | import { Empresas as InputEmpresas } from '../empresas.entity';
export class ReadEmpresaDto extends PartialType(InputEmpresas) {} | import {OmitType,PartialType} from '@nestjs/swagger'; |
|
streaminfo.go | package meta
import (
"crypto/md5"
"errors"
"fmt"
"io"
"github.com/mewkiz/flac/internal/bits"
)
// StreamInfo contains the basic properties of a FLAC audio stream, such as its
// sample rate and channel count. It is the only mandatory metadata block and
// must be present as the first metadata block of a FLAC stream.
//
// ref: https://www.xiph.org/flac/format.html#metadata_block_streaminfo
type StreamInfo struct {
// Minimum block size (in samples) used in the stream; between 16 and 65535
// samples.
BlockSizeMin uint16
// Maximum block size (in samples) used in the stream; between 16 and 65535
// samples.
BlockSizeMax uint16
// Minimum frame size in bytes; a 0 value implies unknown.
FrameSizeMin uint32
// Maximum frame size in bytes; a 0 value implies unknown.
FrameSizeMax uint32
// Sample rate in Hz; between 1 and 655350 Hz.
SampleRate uint32
// Number of channels; between 1 and 8 channels.
NChannels uint8
// Sample size in bits-per-sample; between 4 and 32 bits.
BitsPerSample uint8
// Total number of inter-channel samples in the stream. One second of 44.1
// KHz audio will have 44100 samples regardless of the number of channels. A
// 0 value implies unknown.
NSamples uint64
// MD5 checksum of the unencoded audio data.
MD5sum [md5.Size]uint8
}
// parseStreamInfo reads and parses the body of a StreamInfo metadata block.
func (block *Block) parseStreamInfo() error {
// 16 bits: BlockSizeMin.
br := bits.NewReader(block.lr)
x, err := br.Read(16)
if err != nil {
return unexpected(err)
}
if x < 16 {
return fmt.Errorf("meta.Block.parseStreamInfo: invalid minimum block size (%d); expected >= 16", x)
}
si := new(StreamInfo)
block.Body = si
si.BlockSizeMin = uint16(x)
// 16 bits: BlockSizeMax.
x, err = br.Read(16)
if err != nil {
return unexpected(err)
}
if x < 16 {
return fmt.Errorf("meta.Block.parseStreamInfo: invalid maximum block size (%d); expected >= 16", x)
}
si.BlockSizeMax = uint16(x)
// 24 bits: FrameSizeMin.
x, err = br.Read(24)
if err != nil {
return unexpected(err)
}
si.FrameSizeMin = uint32(x)
// 24 bits: FrameSizeMax.
x, err = br.Read(24)
if err != nil |
si.FrameSizeMax = uint32(x)
// 20 bits: SampleRate.
x, err = br.Read(20)
if err != nil {
return unexpected(err)
}
if x == 0 {
return errors.New("meta.Block.parseStreamInfo: invalid sample rate (0)")
}
si.SampleRate = uint32(x)
// 3 bits: NChannels.
x, err = br.Read(3)
if err != nil {
return unexpected(err)
}
// x contains: (number of channels) - 1
si.NChannels = uint8(x + 1)
// 5 bits: BitsPerSample.
x, err = br.Read(5)
if err != nil {
return unexpected(err)
}
// x contains: (bits-per-sample) - 1
si.BitsPerSample = uint8(x + 1)
// 36 bits: NSamples.
si.NSamples, err = br.Read(36)
if err != nil {
return unexpected(err)
}
// 16 bytes: MD5sum.
_, err = io.ReadFull(block.lr, si.MD5sum[:])
return unexpected(err)
}
| {
return unexpected(err)
} |
i2s0.rs | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - WCLK Source Selection"]
pub aifwclksrc: AIFWCLKSRC,
#[doc = "0x04 - DMA Buffer Size Configuration"]
pub aifdmacfg: AIFDMACFG,
#[doc = "0x08 - Pin Direction"]
pub aifdircfg: AIFDIRCFG,
#[doc = "0x0c - Serial Interface Format Configuration"]
pub aiffmtcfg: AIFFMTCFG,
#[doc = "0x10 - Word Selection Bit Mask for Pin 0"]
pub aifwmask0: AIFWMASK0,
#[doc = "0x14 - Word Selection Bit Mask for Pin 1"]
pub aifwmask1: AIFWMASK1,
#[doc = "0x18 - Word Selection Bit Mask for Pin 2"]
pub aifwmask2: AIFWMASK2,
#[doc = "0x1c - Audio Interface PWM Debug Value"]
pub aifpwmvalue: AIFPWMVALUE,
#[doc = "0x20 - DMA Input Buffer Next Pointer"]
pub aifinptrnext: AIFINPTRNEXT,
#[doc = "0x24 - DMA Input Buffer Current Pointer"]
pub aifinptr: AIFINPTR,
#[doc = "0x28 - DMA Output Buffer Next Pointer"]
pub aifoutptrnext: AIFOUTPTRNEXT,
#[doc = "0x2c - DMA Output Buffer Current Pointer"]
pub aifoutptr: AIFOUTPTR,
_reserved12: [u8; 4usize],
#[doc = "0x34 - SampleStaMP Generator Control Register"]
pub stmpctl: STMPCTL,
#[doc = "0x38 - Captured XOSC Counter Value, Capture Channel 0"]
pub stmpxcntcapt0: STMPXCNTCAPT0,
#[doc = "0x3c - XOSC Period Value"]
pub stmpxper: STMPXPER,
#[doc = "0x40 - Captured WCLK Counter Value, Capture Channel 0"]
pub stmpwcntcapt0: STMPWCNTCAPT0,
#[doc = "0x44 - WCLK Counter Period Value"]
pub stmpwper: STMPWPER,
#[doc = "0x48 - WCLK Counter Trigger Value for Input Pins"]
pub stmpintrig: STMPINTRIG,
#[doc = "0x4c - WCLK Counter Trigger Value for Output Pins"]
pub stmpouttrig: STMPOUTTRIG,
#[doc = "0x50 - WCLK Counter Set Operation"]
pub stmpwset: STMPWSET,
#[doc = "0x54 - WCLK Counter Add Operation"]
pub stmpwadd: STMPWADD,
#[doc = "0x58 - XOSC Minimum Period Value Minimum Value of STMPXPER"]
pub stmpxpermin: STMPXPERMIN,
#[doc = "0x5c - Current Value of WCNT"]
pub stmpwcnt: STMPWCNT,
#[doc = "0x60 - Current Value of XCNT"]
pub stmpxcnt: STMPXCNT,
#[doc = "0x64 - Captured XOSC Counter Value, Capture Channel 1"]
pub stmpxcntcapt1: STMPXCNTCAPT1,
#[doc = "0x68 - Captured WCLK Counter Value, Capture Channel 1"]
pub stmpwcntcapt1: STMPWCNTCAPT1,
_reserved26: [u8; 4usize],
#[doc = "0x70 - Masked Interrupt Status Register"]
pub irqmask: IRQMASK,
#[doc = "0x74 - Raw Interrupt Status Register"]
pub irqflags: IRQFLAGS,
#[doc = "0x78 - Interrupt Set Register"]
pub irqset: IRQSET,
#[doc = "0x7c - Interrupt Clear Register"]
pub irqclr: IRQCLR,
}
#[doc = "WCLK Source Selection\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifwclksrc](aifwclksrc) module"]
pub type AIFWCLKSRC = crate::Reg<u32, _AIFWCLKSRC>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFWCLKSRC;
#[doc = "`read()` method returns [aifwclksrc::R](aifwclksrc::R) reader structure"]
impl crate::Readable for AIFWCLKSRC {}
#[doc = "`write(|w| ..)` method takes [aifwclksrc::W](aifwclksrc::W) writer structure"]
impl crate::Writable for AIFWCLKSRC {}
#[doc = "WCLK Source Selection"]
pub mod aifwclksrc;
#[doc = "DMA Buffer Size Configuration\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifdmacfg](aifdmacfg) module"]
pub type AIFDMACFG = crate::Reg<u32, _AIFDMACFG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFDMACFG;
#[doc = "`read()` method returns [aifdmacfg::R](aifdmacfg::R) reader structure"]
impl crate::Readable for AIFDMACFG {}
#[doc = "`write(|w| ..)` method takes [aifdmacfg::W](aifdmacfg::W) writer structure"]
impl crate::Writable for AIFDMACFG {}
#[doc = "DMA Buffer Size Configuration"]
pub mod aifdmacfg;
#[doc = "Pin Direction\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifdircfg](aifdircfg) module"]
pub type AIFDIRCFG = crate::Reg<u32, _AIFDIRCFG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFDIRCFG;
#[doc = "`read()` method returns [aifdircfg::R](aifdircfg::R) reader structure"]
impl crate::Readable for AIFDIRCFG {}
#[doc = "`write(|w| ..)` method takes [aifdircfg::W](aifdircfg::W) writer structure"]
impl crate::Writable for AIFDIRCFG {}
#[doc = "Pin Direction"]
pub mod aifdircfg;
#[doc = "Serial Interface Format Configuration\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aiffmtcfg](aiffmtcfg) module"]
pub type AIFFMTCFG = crate::Reg<u32, _AIFFMTCFG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct | ;
#[doc = "`read()` method returns [aiffmtcfg::R](aiffmtcfg::R) reader structure"]
impl crate::Readable for AIFFMTCFG {}
#[doc = "`write(|w| ..)` method takes [aiffmtcfg::W](aiffmtcfg::W) writer structure"]
impl crate::Writable for AIFFMTCFG {}
#[doc = "Serial Interface Format Configuration"]
pub mod aiffmtcfg;
#[doc = "Word Selection Bit Mask for Pin 0\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifwmask0](aifwmask0) module"]
pub type AIFWMASK0 = crate::Reg<u32, _AIFWMASK0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFWMASK0;
#[doc = "`read()` method returns [aifwmask0::R](aifwmask0::R) reader structure"]
impl crate::Readable for AIFWMASK0 {}
#[doc = "`write(|w| ..)` method takes [aifwmask0::W](aifwmask0::W) writer structure"]
impl crate::Writable for AIFWMASK0 {}
#[doc = "Word Selection Bit Mask for Pin 0"]
pub mod aifwmask0;
#[doc = "Word Selection Bit Mask for Pin 1\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifwmask1](aifwmask1) module"]
pub type AIFWMASK1 = crate::Reg<u32, _AIFWMASK1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFWMASK1;
#[doc = "`read()` method returns [aifwmask1::R](aifwmask1::R) reader structure"]
impl crate::Readable for AIFWMASK1 {}
#[doc = "`write(|w| ..)` method takes [aifwmask1::W](aifwmask1::W) writer structure"]
impl crate::Writable for AIFWMASK1 {}
#[doc = "Word Selection Bit Mask for Pin 1"]
pub mod aifwmask1;
#[doc = "Word Selection Bit Mask for Pin 2\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifwmask2](aifwmask2) module"]
pub type AIFWMASK2 = crate::Reg<u32, _AIFWMASK2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFWMASK2;
#[doc = "`read()` method returns [aifwmask2::R](aifwmask2::R) reader structure"]
impl crate::Readable for AIFWMASK2 {}
#[doc = "`write(|w| ..)` method takes [aifwmask2::W](aifwmask2::W) writer structure"]
impl crate::Writable for AIFWMASK2 {}
#[doc = "Word Selection Bit Mask for Pin 2"]
pub mod aifwmask2;
#[doc = "Audio Interface PWM Debug Value\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifpwmvalue](aifpwmvalue) module"]
pub type AIFPWMVALUE = crate::Reg<u32, _AIFPWMVALUE>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFPWMVALUE;
#[doc = "`read()` method returns [aifpwmvalue::R](aifpwmvalue::R) reader structure"]
impl crate::Readable for AIFPWMVALUE {}
#[doc = "`write(|w| ..)` method takes [aifpwmvalue::W](aifpwmvalue::W) writer structure"]
impl crate::Writable for AIFPWMVALUE {}
#[doc = "Audio Interface PWM Debug Value"]
pub mod aifpwmvalue;
#[doc = "DMA Input Buffer Next Pointer\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifinptrnext](aifinptrnext) module"]
pub type AIFINPTRNEXT = crate::Reg<u32, _AIFINPTRNEXT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFINPTRNEXT;
#[doc = "`read()` method returns [aifinptrnext::R](aifinptrnext::R) reader structure"]
impl crate::Readable for AIFINPTRNEXT {}
#[doc = "`write(|w| ..)` method takes [aifinptrnext::W](aifinptrnext::W) writer structure"]
impl crate::Writable for AIFINPTRNEXT {}
#[doc = "DMA Input Buffer Next Pointer"]
pub mod aifinptrnext;
#[doc = "DMA Input Buffer Current Pointer\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifinptr](aifinptr) module"]
pub type AIFINPTR = crate::Reg<u32, _AIFINPTR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFINPTR;
#[doc = "`read()` method returns [aifinptr::R](aifinptr::R) reader structure"]
impl crate::Readable for AIFINPTR {}
#[doc = "`write(|w| ..)` method takes [aifinptr::W](aifinptr::W) writer structure"]
impl crate::Writable for AIFINPTR {}
#[doc = "DMA Input Buffer Current Pointer"]
pub mod aifinptr;
#[doc = "DMA Output Buffer Next Pointer\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifoutptrnext](aifoutptrnext) module"]
pub type AIFOUTPTRNEXT = crate::Reg<u32, _AIFOUTPTRNEXT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFOUTPTRNEXT;
#[doc = "`read()` method returns [aifoutptrnext::R](aifoutptrnext::R) reader structure"]
impl crate::Readable for AIFOUTPTRNEXT {}
#[doc = "`write(|w| ..)` method takes [aifoutptrnext::W](aifoutptrnext::W) writer structure"]
impl crate::Writable for AIFOUTPTRNEXT {}
#[doc = "DMA Output Buffer Next Pointer"]
pub mod aifoutptrnext;
#[doc = "DMA Output Buffer Current Pointer\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [aifoutptr](aifoutptr) module"]
pub type AIFOUTPTR = crate::Reg<u32, _AIFOUTPTR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _AIFOUTPTR;
#[doc = "`read()` method returns [aifoutptr::R](aifoutptr::R) reader structure"]
impl crate::Readable for AIFOUTPTR {}
#[doc = "`write(|w| ..)` method takes [aifoutptr::W](aifoutptr::W) writer structure"]
impl crate::Writable for AIFOUTPTR {}
#[doc = "DMA Output Buffer Current Pointer"]
pub mod aifoutptr;
#[doc = "SampleStaMP Generator Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpctl](stmpctl) module"]
pub type STMPCTL = crate::Reg<u32, _STMPCTL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPCTL;
#[doc = "`read()` method returns [stmpctl::R](stmpctl::R) reader structure"]
impl crate::Readable for STMPCTL {}
#[doc = "`write(|w| ..)` method takes [stmpctl::W](stmpctl::W) writer structure"]
impl crate::Writable for STMPCTL {}
#[doc = "SampleStaMP Generator Control Register"]
pub mod stmpctl;
#[doc = "Captured XOSC Counter Value, Capture Channel 0\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpxcntcapt0](stmpxcntcapt0) module"]
pub type STMPXCNTCAPT0 = crate::Reg<u32, _STMPXCNTCAPT0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPXCNTCAPT0;
#[doc = "`read()` method returns [stmpxcntcapt0::R](stmpxcntcapt0::R) reader structure"]
impl crate::Readable for STMPXCNTCAPT0 {}
#[doc = "Captured XOSC Counter Value, Capture Channel 0"]
pub mod stmpxcntcapt0;
#[doc = "XOSC Period Value\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpxper](stmpxper) module"]
pub type STMPXPER = crate::Reg<u32, _STMPXPER>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPXPER;
#[doc = "`read()` method returns [stmpxper::R](stmpxper::R) reader structure"]
impl crate::Readable for STMPXPER {}
#[doc = "XOSC Period Value"]
pub mod stmpxper;
#[doc = "Captured WCLK Counter Value, Capture Channel 0\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwcntcapt0](stmpwcntcapt0) module"]
pub type STMPWCNTCAPT0 = crate::Reg<u32, _STMPWCNTCAPT0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWCNTCAPT0;
#[doc = "`read()` method returns [stmpwcntcapt0::R](stmpwcntcapt0::R) reader structure"]
impl crate::Readable for STMPWCNTCAPT0 {}
#[doc = "Captured WCLK Counter Value, Capture Channel 0"]
pub mod stmpwcntcapt0;
#[doc = "WCLK Counter Period Value\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwper](stmpwper) module"]
pub type STMPWPER = crate::Reg<u32, _STMPWPER>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWPER;
#[doc = "`read()` method returns [stmpwper::R](stmpwper::R) reader structure"]
impl crate::Readable for STMPWPER {}
#[doc = "`write(|w| ..)` method takes [stmpwper::W](stmpwper::W) writer structure"]
impl crate::Writable for STMPWPER {}
#[doc = "WCLK Counter Period Value"]
pub mod stmpwper;
#[doc = "WCLK Counter Trigger Value for Input Pins\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpintrig](stmpintrig) module"]
pub type STMPINTRIG = crate::Reg<u32, _STMPINTRIG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPINTRIG;
#[doc = "`read()` method returns [stmpintrig::R](stmpintrig::R) reader structure"]
impl crate::Readable for STMPINTRIG {}
#[doc = "`write(|w| ..)` method takes [stmpintrig::W](stmpintrig::W) writer structure"]
impl crate::Writable for STMPINTRIG {}
#[doc = "WCLK Counter Trigger Value for Input Pins"]
pub mod stmpintrig;
#[doc = "WCLK Counter Trigger Value for Output Pins\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpouttrig](stmpouttrig) module"]
pub type STMPOUTTRIG = crate::Reg<u32, _STMPOUTTRIG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPOUTTRIG;
#[doc = "`read()` method returns [stmpouttrig::R](stmpouttrig::R) reader structure"]
impl crate::Readable for STMPOUTTRIG {}
#[doc = "`write(|w| ..)` method takes [stmpouttrig::W](stmpouttrig::W) writer structure"]
impl crate::Writable for STMPOUTTRIG {}
#[doc = "WCLK Counter Trigger Value for Output Pins"]
pub mod stmpouttrig;
#[doc = "WCLK Counter Set Operation\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwset](stmpwset) module"]
pub type STMPWSET = crate::Reg<u32, _STMPWSET>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWSET;
#[doc = "`read()` method returns [stmpwset::R](stmpwset::R) reader structure"]
impl crate::Readable for STMPWSET {}
#[doc = "`write(|w| ..)` method takes [stmpwset::W](stmpwset::W) writer structure"]
impl crate::Writable for STMPWSET {}
#[doc = "WCLK Counter Set Operation"]
pub mod stmpwset;
#[doc = "WCLK Counter Add Operation\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwadd](stmpwadd) module"]
pub type STMPWADD = crate::Reg<u32, _STMPWADD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWADD;
#[doc = "`read()` method returns [stmpwadd::R](stmpwadd::R) reader structure"]
impl crate::Readable for STMPWADD {}
#[doc = "`write(|w| ..)` method takes [stmpwadd::W](stmpwadd::W) writer structure"]
impl crate::Writable for STMPWADD {}
#[doc = "WCLK Counter Add Operation"]
pub mod stmpwadd;
#[doc = "XOSC Minimum Period Value Minimum Value of STMPXPER\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpxpermin](stmpxpermin) module"]
pub type STMPXPERMIN = crate::Reg<u32, _STMPXPERMIN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPXPERMIN;
#[doc = "`read()` method returns [stmpxpermin::R](stmpxpermin::R) reader structure"]
impl crate::Readable for STMPXPERMIN {}
#[doc = "`write(|w| ..)` method takes [stmpxpermin::W](stmpxpermin::W) writer structure"]
impl crate::Writable for STMPXPERMIN {}
#[doc = "XOSC Minimum Period Value Minimum Value of STMPXPER"]
pub mod stmpxpermin;
#[doc = "Current Value of WCNT\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwcnt](stmpwcnt) module"]
pub type STMPWCNT = crate::Reg<u32, _STMPWCNT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWCNT;
#[doc = "`read()` method returns [stmpwcnt::R](stmpwcnt::R) reader structure"]
impl crate::Readable for STMPWCNT {}
#[doc = "Current Value of WCNT"]
pub mod stmpwcnt;
#[doc = "Current Value of XCNT\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpxcnt](stmpxcnt) module"]
pub type STMPXCNT = crate::Reg<u32, _STMPXCNT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPXCNT;
#[doc = "`read()` method returns [stmpxcnt::R](stmpxcnt::R) reader structure"]
impl crate::Readable for STMPXCNT {}
#[doc = "Current Value of XCNT"]
pub mod stmpxcnt;
#[doc = "Captured XOSC Counter Value, Capture Channel 1\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpxcntcapt1](stmpxcntcapt1) module"]
pub type STMPXCNTCAPT1 = crate::Reg<u32, _STMPXCNTCAPT1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPXCNTCAPT1;
#[doc = "`read()` method returns [stmpxcntcapt1::R](stmpxcntcapt1::R) reader structure"]
impl crate::Readable for STMPXCNTCAPT1 {}
#[doc = "Captured XOSC Counter Value, Capture Channel 1"]
pub mod stmpxcntcapt1;
#[doc = "Captured WCLK Counter Value, Capture Channel 1\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [stmpwcntcapt1](stmpwcntcapt1) module"]
pub type STMPWCNTCAPT1 = crate::Reg<u32, _STMPWCNTCAPT1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _STMPWCNTCAPT1;
#[doc = "`read()` method returns [stmpwcntcapt1::R](stmpwcntcapt1::R) reader structure"]
impl crate::Readable for STMPWCNTCAPT1 {}
#[doc = "Captured WCLK Counter Value, Capture Channel 1"]
pub mod stmpwcntcapt1;
#[doc = "Masked Interrupt Status Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [irqmask](irqmask) module"]
pub type IRQMASK = crate::Reg<u32, _IRQMASK>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _IRQMASK;
#[doc = "`read()` method returns [irqmask::R](irqmask::R) reader structure"]
impl crate::Readable for IRQMASK {}
#[doc = "`write(|w| ..)` method takes [irqmask::W](irqmask::W) writer structure"]
impl crate::Writable for IRQMASK {}
#[doc = "Masked Interrupt Status Register"]
pub mod irqmask;
#[doc = "Raw Interrupt Status Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [irqflags](irqflags) module"]
pub type IRQFLAGS = crate::Reg<u32, _IRQFLAGS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _IRQFLAGS;
#[doc = "`read()` method returns [irqflags::R](irqflags::R) reader structure"]
impl crate::Readable for IRQFLAGS {}
#[doc = "Raw Interrupt Status Register"]
pub mod irqflags;
#[doc = "Interrupt Set Register\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [irqset](irqset) module"]
pub type IRQSET = crate::Reg<u32, _IRQSET>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _IRQSET;
#[doc = "`write(|w| ..)` method takes [irqset::W](irqset::W) writer structure"]
impl crate::Writable for IRQSET {}
#[doc = "Interrupt Set Register"]
pub mod irqset;
#[doc = "Interrupt Clear Register\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [irqclr](irqclr) module"]
pub type IRQCLR = crate::Reg<u32, _IRQCLR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _IRQCLR;
#[doc = "`write(|w| ..)` method takes [irqclr::W](irqclr::W) writer structure"]
impl crate::Writable for IRQCLR {}
#[doc = "Interrupt Clear Register"]
pub mod irqclr;
| _AIFFMTCFG |
MiniSearch.ts | import SearchableMap from './SearchableMap/SearchableMap'
const OR = 'or'
const AND = 'and'
const AND_NOT = 'and_not'
/**
* Search options to customize the search behavior.
*/
export type SearchOptions = {
/**
* Names of the fields to search in. If omitted, all fields are searched.
*/
fields?: string[],
/**
* Function used to filter search results, for example on the basis of stored
* fields. It takes as argument each search result and should return a boolean
* to indicate if the result should be kept or not.
*/
filter?: (result: SearchResult) => boolean,
/**
* Key-value object of field names to boosting values. By default, fields are
* assigned a boosting factor of 1. If one assigns to a field a boosting value
* of 2, a result that matches the query in that field is assigned a score
* twice as high as a result matching the query in another field, all else
* being equal.
*/
boost?: { [fieldName: string]: number },
/**
* Relative weights to assign to prefix search results and fuzzy search
* results. Exact matches are assigned a weight of 1.
*/
weights?: { fuzzy: number, prefix: number },
/**
* Function to calculate a boost factor for documents. It takes as arguments
* the document ID, and a term that matches the search in that document, and
* should return a boosting factor.
*/
boostDocument?: (documentId: any, term: string) => number,
/**
* Controls whether to perform prefix search. It can be a simple boolean, or a
* function.
*
* If a boolean is passed, prefix search is performed if true.
*
* If a function is passed, it is called upon search with a search term, the
* positional index of that search term in the tokenized search query, and the
* tokenized search query. The function should return a boolean to indicate
* whether to perform prefix search for that search term.
*/
prefix?: boolean | ((term: string, index: number, terms: string[]) => boolean),
/**
* Controls whether to perform fuzzy search. It can be a simple boolean, or a
* number, or a function.
*
* If a boolean is given, fuzzy search with a default fuzziness parameter is
* performed if true.
*
* If a number higher or equal to 1 is given, fuzzy search is performed, with
* a mazimum edit distance (Levenshtein) equal to the number.
*
* If a number between 0 and 1 is given, fuzzy search is performed within a
* maximum edit distance corresponding to that fraction of the term length,
* approximated to the nearest integer. For example, 0.2 would mean an edit
* distance of 20% of the term length, so 1 character in a 5-characters term.
*
* If a function is passed, the function is called upon search with a search
* term, a positional index of that term in the tokenized search query, and
* the tokenized search query. It should return a boolean or a number, with
* the meaning documented above.
*/
fuzzy?: boolean | number | ((term: string, index: number, terms: string[]) => boolean | number),
/**
* The operand to combine partial results for each term. By default it is
* "OR", so results matching _any_ of the search terms are returned by a
* search. If "AND" is given, only results matching _all_ the search terms are
* returned by a search.
*/
combineWith?: string,
/**
* Function to tokenize the search query. By default, the same tokenizer used
* for indexing is used also for search.
*/
tokenize?: (text: string) => string[],
/**
* Function to process or normalize terms in the search query. By default, the
* same term processor used for indexing is used also for search.
*/
processTerm?: (term: string) => string | null | undefined | false
}
type SearchOptionsWithDefaults = SearchOptions & {
boost: { [fieldName: string]: number },
weights: { fuzzy: number, prefix: number },
prefix: boolean | ((term: string, index: number, terms: string[]) => boolean),
fuzzy: boolean | number | ((term: string, index: number, terms: string[]) => boolean | number),
combineWith: string
}
/**
* Configuration options passed to the [[MiniSearch]] constructor
*
* @typeParam T The type of documents being indexed.
*/
export type Options<T = any> = {
/**
* Names of the document fields to be indexed.
*/
fields: string[],
/**
* Name of the ID field, uniquely identifying a document.
*/
idField?: string,
/**
* Names of fields to store, so that search results would include them. By
* default none, so resuts would only contain the id field.
*/
storeFields?: string[],
/**
* Function used to extract the value of each field in documents. By default,
* the documents are assumed to be plain objects with field names as keys,
* but by specifying a custom `extractField` function one can completely
* customize how the fields are extracted.
*
* The function takes as arguments the document, and the name of the field to
* extract from it. It should return the field value as a string.
*/
extractField?: (document: T, fieldName: string) => string,
/*
* Function used to split a field value into individual terms to be indexed.
* The default tokenizer separates terms by space or punctuation, but a
* custom tokenizer can be provided for custom logic.
*
* The function takes as arguments string to tokenize, and the name of the
* field it comes from. It should return the terms as an array of strings.
* When used for tokenizing a search query instead of a document field, the
* `fieldName` is undefined.
*/
tokenize?: (text: string, fieldName?: string) => string[],
/**
* Function used to process a term before indexing or search. This can be
* used for normalization (such as stemming). By default, terms are
* downcased, and otherwise no other normalization is performed.
*
* The function takes as arguments a term to process, and the name of the
* field it comes from. It should return the processed term as a string, or a
* falsy value to reject the term entirely.
*/
processTerm?: (term: string, fieldName?: string) => string | null | undefined | false,
/**
* Default search options (see the [[SearchOptions]] type and the
* [[MiniSearch.search]] method for details)
*/
searchOptions?: SearchOptions
}
type OptionsWithDefaults<T = any> = Options<T> & {
storeFields: string[],
idField: string,
extractField: (document: T, fieldName: string) => string,
tokenize: (text: string, fieldName: string) => string[],
processTerm: (term: string, fieldName: string) => string | null | undefined | false,
searchOptions: SearchOptionsWithDefaults
}
/**
* The type of auto-suggestions
*/
export type Suggestion = {
/**
* The suggestion
*/
suggestion: string,
/**
* Suggestion as an array of terms
*/
terms: string[],
/**
* Score for the suggestion
*/
score: number
}
/**
* Match information for a search result. It is a key-value object where keys
* are terms that matched, and values are the list of fields that the term was
* found in.
*/
export type MatchInfo = {
[term: string]: string[]
}
/**
* Type of the search results. Each search result indicates the document ID, the
* terms that matched, the match information, the score, and all the stored
* fields.
*/
export type SearchResult = {
/**
* The document ID
*/
id: any,
/**
* List of terms that matched
*/
terms: string[],
/**
* Score of the search results
*/
score: number,
/**
* Match information, see [[MatchInfo]]
*/
match: MatchInfo,
/**
* Stored fields
*/
[key: string]: any
}
/**
* @ignore
*/
export type AsPlainObject = {
index: { _tree: {}, _prefix: string },
documentCount: number,
nextId: number,
documentIds: { [shortId: string]: any }
fieldIds: { [fieldName: string]: number }
fieldLength: { [shortId: string]: { [fieldId: string]: number } },
averageFieldLength: { [fieldId: string]: number },
storedFields: { [shortId: string]: any }
}
export type QueryCombination = SearchOptions & { queries: Query[] }
/**
* Search query expression, either a query string or an expression tree
* combining several queries with a combination of AND or OR.
*/
export type Query = QueryCombination | string
type QuerySpec = {
prefix: boolean,
fuzzy: number | boolean,
term: string
}
type IndexData = {
[fieldId: string]: { df: number, ds: { [shortId: string]: number } }
}
type RawResult = {
[shortId: string]: { score: number, match: MatchInfo, terms: string[] }
}
/**
* [[MiniSearch]] is the main entrypoint class, implementing a full-text search
* engine in memory.
*
* @typeParam T The type of the documents being indexed.
*
* ### Basic example:
*
* ```javascript
* const documents = [
* {
* id: 1,
* title: 'Moby Dick',
* text: 'Call me Ishmael. Some years ago...',
* category: 'fiction'
* },
* {
* id: 2,
* title: 'Zen and the Art of Motorcycle Maintenance',
* text: 'I can see by my watch...',
* category: 'fiction'
* },
* {
* id: 3,
* title: 'Neuromancer',
* text: 'The sky above the port was...',
* category: 'fiction'
* },
* {
* id: 4,
* title: 'Zen and the Art of Archery',
* text: 'At first sight it must seem...',
* category: 'non-fiction'
* },
* // ...and more
* ]
*
* // Create a search engine that indexes the 'title' and 'text' fields for
* // full-text search. Search results will include 'title' and 'category' (plus the
* // id field, that is always stored and returned)
* const miniSearch = new MiniSearch({
* fields: ['title', 'text'],
* storeFields: ['title', 'category']
* })
*
* // Add documents to the index
* miniSearch.addAll(documents)
*
* // Search for documents:
* let results = miniSearch.search('zen art motorcycle')
* // => [
* // { id: 2, title: 'Zen and the Art of Motorcycle Maintenance', category: 'fiction', score: 2.77258 },
* // { id: 4, title: 'Zen and the Art of Archery', category: 'non-fiction', score: 1.38629 }
* // ]
* ```
*/
export default class | <T = any> {
protected _options: OptionsWithDefaults<T>
protected _index: SearchableMap
protected _documentCount: number
protected _documentIds: { [shortId: string]: any }
protected _fieldIds: { [fieldName: string]: number }
protected _fieldLength: { [shortId: string]: { [fieldId: string]: number } }
protected _averageFieldLength: { [fieldId: string]: number }
protected _nextId: number
protected _storedFields: { [shortId: string]: any }
/**
* @param options Configuration options
*
* ### Examples:
*
* ```javascript
* // Create a search engine that indexes the 'title' and 'text' fields of your
* // documents:
* const miniSearch = new MiniSearch({ fields: ['title', 'text'] })
* ```
*
* ### ID Field:
*
* ```javascript
* // Your documents are assumed to include a unique 'id' field, but if you want
* // to use a different field for document identification, you can set the
* // 'idField' option:
* const miniSearch = new MiniSearch({ idField: 'key', fields: ['title', 'text'] })
* ```
*
* ### Options and defaults:
*
* ```javascript
* // The full set of options (here with their default value) is:
* const miniSearch = new MiniSearch({
* // idField: field that uniquely identifies a document
* idField: 'id',
*
* // extractField: function used to get the value of a field in a document.
* // By default, it assumes the document is a flat object with field names as
* // property keys and field values as string property values, but custom logic
* // can be implemented by setting this option to a custom extractor function.
* extractField: (document, fieldName) => document[fieldName],
*
* // tokenize: function used to split fields into individual terms. By
* // default, it is also used to tokenize search queries, unless a specific
* // `tokenize` search option is supplied. When tokenizing an indexed field,
* // the field name is passed as the second argument.
* tokenize: (string, _fieldName) => string.split(SPACE_OR_PUNCTUATION),
*
* // processTerm: function used to process each tokenized term before
* // indexing. It can be used for stemming and normalization. Return a falsy
* // value in order to discard a term. By default, it is also used to process
* // search queries, unless a specific `processTerm` option is supplied as a
* // search option. When processing a term from a indexed field, the field
* // name is passed as the second argument.
* processTerm: (term, _fieldName) => term.toLowerCase(),
*
* // searchOptions: default search options, see the `search` method for
* // details
* searchOptions: undefined,
*
* // fields: document fields to be indexed. Mandatory, but not set by default
* fields: undefined
*
* // storeFields: document fields to be stored and returned as part of the
* // search results.
* storeFields: []
* })
* ```
*/
constructor (options: Options<T>) {
if (options?.fields == null) {
throw new Error('MiniSearch: option "fields" must be provided')
}
this._options = {
...defaultOptions,
...options,
searchOptions: { ...defaultSearchOptions, ...(options.searchOptions || {}) }
}
this._index = new SearchableMap()
this._documentCount = 0
this._documentIds = {}
this._fieldIds = {}
this._fieldLength = {}
this._averageFieldLength = {}
this._nextId = 0
this._storedFields = {}
this.addFields(this._options.fields)
}
/**
* Adds a document to the index
*
* @param document The document to be indexed
*/
add (document: T): void {
const { extractField, tokenize, processTerm, fields, idField } = this._options
const id = extractField(document, idField)
if (id == null) {
throw new Error(`MiniSearch: document does not have ID field "${idField}"`)
}
const shortDocumentId = this.addDocumentId(id)
this.saveStoredFields(shortDocumentId, document)
fields.forEach(field => {
const fieldValue = extractField(document, field)
if (fieldValue == null) { return }
const tokens = tokenize(fieldValue.toString(), field)
this.addFieldLength(shortDocumentId, this._fieldIds[field], this.documentCount - 1, tokens.length)
tokens.forEach(term => {
const processedTerm = processTerm(term, field)
if (processedTerm) {
this.addTerm(this._fieldIds[field], shortDocumentId, processedTerm)
}
})
})
}
/**
* Adds all the given documents to the index
*
* @param documents An array of documents to be indexed
*/
addAll (documents: T[]): void {
documents.forEach(document => this.add(document))
}
/**
* Adds all the given documents to the index asynchronously.
*
* Returns a promise that resolves (to `undefined`) when the indexing is done.
* This method is useful when index many documents, to avoid blocking the main
* thread. The indexing is performed asynchronously and in chunks.
*
* @param documents An array of documents to be indexed
* @param options Configuration options
* @return A promise resolving to `undefined` when the indexing is done
*/
addAllAsync (documents: T[], options: { chunkSize?: number } = {}): Promise<void> {
const { chunkSize = 10 } = options
const acc: { chunk: T[], promise: Promise<void> } = { chunk: [], promise: Promise.resolve() }
const { chunk, promise } = documents.reduce(({ chunk, promise }, document: T, i: number) => {
chunk.push(document)
if ((i + 1) % chunkSize === 0) {
return {
chunk: [],
promise: promise
.then(() => new Promise(resolve => setTimeout(resolve, 0)))
.then(() => this.addAll(chunk))
}
} else {
return { chunk, promise }
}
}, acc)
return promise.then(() => this.addAll(chunk))
}
/**
* Removes the given document from the index.
*
* The document to delete must NOT have changed between indexing and deletion,
* otherwise the index will be corrupted. Therefore, when reindexing a document
* after a change, the correct order of operations is:
*
* 1. remove old version
* 2. apply changes
* 3. index new version
*
* @param document The document to be removed
*/
remove (document: T): void {
const { tokenize, processTerm, extractField, fields, idField } = this._options
const id = extractField(document, idField)
if (id == null) {
throw new Error(`MiniSearch: document does not have ID field "${idField}"`)
}
const [shortDocumentId] = Object.entries(this._documentIds)
.find(([_, longId]) => id === longId) || []
if (shortDocumentId == null) {
throw new Error(`MiniSearch: cannot remove document with ID ${id}: it is not in the index`)
}
fields.forEach(field => {
const fieldValue = extractField(document, field)
if (fieldValue == null) { return }
const tokens = tokenize(fieldValue.toString(), field)
tokens.forEach(term => {
const processedTerm = processTerm(term, field)
if (processedTerm) {
this.removeTerm(this._fieldIds[field], shortDocumentId, processedTerm)
}
})
this.removeFieldLength(shortDocumentId, this._fieldIds[field], this.documentCount, tokens.length)
})
delete this._storedFields[shortDocumentId]
delete this._documentIds[shortDocumentId]
delete this._fieldLength[shortDocumentId]
this._documentCount -= 1
}
/**
* Removes all the given documents from the index. If called with no arguments,
* it removes _all_ documents from the index.
*
* @param documents The documents to be removed. If this argument is omitted,
* all documents are removed. Note that, for removing all documents, it is
* more efficient to call this method with no arguments than to pass all
* documents.
*/
removeAll (documents?: T[]): void {
if (documents) {
documents.forEach(document => this.remove(document))
} else if (arguments.length > 0) {
throw new Error('Expected documents to be present. Omit the argument to remove all documents.')
} else {
this._index = new SearchableMap()
this._documentCount = 0
this._documentIds = {}
this._fieldLength = {}
this._averageFieldLength = {}
this._storedFields = {}
this._nextId = 0
}
}
/**
* Search for documents matching the given search query.
*
* The result is a list of scored document IDs matching the query, sorted by
* descending score, and each including data about which terms were matched and
* in which fields.
*
* ### Basic usage:
*
* ```javascript
* // Search for "zen art motorcycle" with default options: terms have to match
* // exactly, and individual terms are joined with OR
* miniSearch.search('zen art motorcycle')
* // => [ { id: 2, score: 2.77258, match: { ... } }, { id: 4, score: 1.38629, match: { ... } } ]
* ```
*
* ### Restrict search to specific fields:
*
* ```javascript
* // Search only in the 'title' field
* miniSearch.search('zen', { fields: ['title'] })
* ```
*
* ### Field boosting:
*
* ```javascript
* // Boost a field
* miniSearch.search('zen', { boost: { title: 2 } })
* ```
*
* ### Prefix search:
*
* ```javascript
* // Search for "moto" with prefix search (it will match documents
* // containing terms that start with "moto" or "neuro")
* miniSearch.search('moto neuro', { prefix: true })
* ```
*
* ### Fuzzy search:
*
* ```javascript
* // Search for "ismael" with fuzzy search (it will match documents containing
* // terms similar to "ismael", with a maximum edit distance of 0.2 term.length
* // (rounded to nearest integer)
* miniSearch.search('ismael', { fuzzy: 0.2 })
* ```
*
* ### Combining strategies:
*
* ```javascript
* // Mix of exact match, prefix search, and fuzzy search
* miniSearch.search('ismael mob', {
* prefix: true,
* fuzzy: 0.2
* })
* ```
*
* ### Advanced prefix and fuzzy search:
*
* ```javascript
* // Perform fuzzy and prefix search depending on the search term. Here
* // performing prefix and fuzzy search only on terms longer than 3 characters
* miniSearch.search('ismael mob', {
* prefix: term => term.length > 3
* fuzzy: term => term.length > 3 ? 0.2 : null
* })
* ```
*
* ### Combine with AND:
*
* ```javascript
* // Combine search terms with AND (to match only documents that contain both
* // "motorcycle" and "art")
* miniSearch.search('motorcycle art', { combineWith: 'AND' })
* ```
*
* ### Combine with AND_NOT:
*
* There is also an AND_NOT combinator, that finds documents that match the
* first term, but do not match any of the other terms. This combinator is
* rarely useful with simple queries, and is meant to be used with advanced
* query combinations (see later for more details).
*
* ### Filtering results:
*
* ```javascript
* // Filter only results in the 'fiction' category (assuming that 'category'
* // is a stored field)
* miniSearch.search('motorcycle art', {
* filter: (result) => result.category === 'fiction'
* })
* ```
*
* ### Advanced combination of queries:
*
* It is possible to combine different subqueries with OR, AND, and AND_NOT,
* and even with different search options, by passing a query expression
* tree object as the first argument, instead of a string.
*
* ```javascript
* // Search for documents that contain "zen" and ("motorcycle" or "archery")
* miniSearch.search({
* combineWith: 'AND',
* queries: [
* 'zen',
* {
* combineWith: 'OR',
* queries: ['motorcycle', 'archery']
* }
* ]
* })
*
* // Search for documents that contain ("apple" or "pear") but not "juice" and
* // not "tree"
* miniSearch.search({
* combineWith: 'AND_NOT',
* queries: [
* {
* combineWith: 'OR',
* queries: ['apple', 'pear']
* },
* 'juice',
* 'tree'
* ]
* })
* ```
*
* Each node in the expression tree can be either a string, or an object that
* supports all `SearchOptions` fields, plus a `queries` array field for
* subqueries.
*
* Note that, while this can become complicated to do by hand for complex or
* deeply nested queries, it provides a formalized expression tree API for
* external libraries that implement a parser for custom query languages.
*
* @param query Search query
* @param options Search options. Each option, if not given, defaults to the corresponding value of `searchOptions` given to the constructor, or to the library default.
*/
search (query: Query, searchOptions: SearchOptions = {}): SearchResult[] {
const combinedResults = this.executeQuery(query, searchOptions)
return Object.entries(combinedResults)
.reduce((results: SearchResult[], [docId, { score, match, terms }]) => {
const result = {
id: this._documentIds[docId],
terms: uniq(terms),
score,
match
}
Object.assign(result, this._storedFields[docId])
if (searchOptions.filter == null || searchOptions.filter(result)) {
results.push(result)
}
return results
}, [])
.sort(({ score: a }, { score: b }) => a < b ? 1 : -1)
}
/**
* Provide suggestions for the given search query
*
* The result is a list of suggested modified search queries, derived from the
* given search query, each with a relevance score, sorted by descending score.
*
* ### Basic usage:
*
* ```javascript
* // Get suggestions for 'neuro':
* miniSearch.autoSuggest('neuro')
* // => [ { suggestion: 'neuromancer', terms: [ 'neuromancer' ], score: 0.46240 } ]
* ```
*
* ### Multiple words:
*
* ```javascript
* // Get suggestions for 'zen ar':
* miniSearch.autoSuggest('zen ar')
* // => [
* // { suggestion: 'zen archery art', terms: [ 'zen', 'archery', 'art' ], score: 1.73332 },
* // { suggestion: 'zen art', terms: [ 'zen', 'art' ], score: 1.21313 }
* // ]
* ```
*
* ### Fuzzy suggestions:
*
* ```javascript
* // Correct spelling mistakes using fuzzy search:
* miniSearch.autoSuggest('neromancer', { fuzzy: 0.2 })
* // => [ { suggestion: 'neuromancer', terms: [ 'neuromancer' ], score: 1.03998 } ]
* ```
*
* ### Filtering:
*
* ```javascript
* // Get suggestions for 'zen ar', but only within the 'fiction' category
* // (assuming that 'category' is a stored field):
* miniSearch.autoSuggest('zen ar', {
* filter: (result) => result.category === 'fiction'
* })
* // => [
* // { suggestion: 'zen archery art', terms: [ 'zen', 'archery', 'art' ], score: 1.73332 },
* // { suggestion: 'zen art', terms: [ 'zen', 'art' ], score: 1.21313 }
* // ]
* ```
*
* @param queryString Query string to be expanded into suggestions
* @param options Search options. The supported options and default values
* are the same as for the `search` method, except that by default prefix
* search is performed on the last term in the query.
* @return A sorted array of suggestions sorted by relevance score.
*/
autoSuggest (queryString: string, options: SearchOptions = {}): Suggestion[] {
options = { ...defaultAutoSuggestOptions, ...options }
const suggestions = this.search(queryString, options).reduce((
suggestions: { [phrase: string]: Omit<Suggestion, 'suggestion'> & { count: number } },
{ score, terms }
) => {
const phrase = terms.join(' ')
if (suggestions[phrase] == null) {
suggestions[phrase] = { score, terms, count: 1 }
} else {
suggestions[phrase].score += score
suggestions[phrase].count += 1
}
return suggestions
}, {})
return Object.entries(suggestions)
.map(([suggestion, { score, terms, count }]) => ({ suggestion, terms, score: score / count }))
.sort(({ score: a }, { score: b }) => a < b ? 1 : -1)
}
/**
* Number of documents in the index
*/
get documentCount (): number {
return this._documentCount
}
/**
* Deserializes a JSON index (serialized with `miniSearch.toJSON()`) and
* instantiates a MiniSearch instance. It should be given the same options
* originally used when serializing the index.
*
* ### Usage:
*
* ```javascript
* // If the index was serialized with:
* let miniSearch = new MiniSearch({ fields: ['title', 'text'] })
* miniSearch.addAll(documents)
*
* const json = JSON.stringify(miniSearch)
* // It can later be deserialized like this:
* miniSearch = MiniSearch.loadJSON(json, { fields: ['title', 'text'] })
* ```
*
* @param json JSON-serialized index
* @param options configuration options, same as the constructor
* @return An instance of MiniSearch deserialized from the given JSON.
*/
static loadJSON<T = any> (json: string, options: Options<T>): MiniSearch<T> {
if (options == null) {
throw new Error('MiniSearch: loadJSON should be given the same options used when serializing the index')
}
return MiniSearch.loadJS(JSON.parse(json), options)
}
/**
* Returns the default value of an option. It will throw an error if no option
* with the given name exists.
*
* @param optionName Name of the option
* @return The default value of the given option
*
* ### Usage:
*
* ```javascript
* // Get default tokenizer
* MiniSearch.getDefault('tokenize')
*
* // Get default term processor
* MiniSearch.getDefault('processTerm')
*
* // Unknown options will throw an error
* MiniSearch.getDefault('notExisting')
* // => throws 'MiniSearch: unknown option "notExisting"'
* ```
*/
static getDefault (optionName: string): any {
if (defaultOptions.hasOwnProperty(optionName)) {
return getOwnProperty(defaultOptions, optionName)
} else {
throw new Error(`MiniSearch: unknown option "${optionName}"`)
}
}
/**
* @ignore
*/
static loadJS<T = any> (js: AsPlainObject, options: Options<T>): MiniSearch<T> {
const {
index,
documentCount,
nextId,
documentIds,
fieldIds,
fieldLength,
averageFieldLength,
storedFields
} = js
const miniSearch = new MiniSearch(options)
miniSearch._index = new SearchableMap(index._tree, index._prefix)
miniSearch._documentCount = documentCount
miniSearch._nextId = nextId
miniSearch._documentIds = documentIds
miniSearch._fieldIds = fieldIds
miniSearch._fieldLength = fieldLength
miniSearch._averageFieldLength = averageFieldLength
miniSearch._fieldIds = fieldIds
miniSearch._storedFields = storedFields || {}
return miniSearch
}
/**
* @ignore
*/
private executeQuery (query: Query, searchOptions: SearchOptions = {}): RawResult {
if (typeof query === 'string') {
return this.executeSearch(query, searchOptions)
} else {
const results = query.queries.map((subquery) => {
const options = { ...searchOptions, ...query, queries: undefined }
return this.executeQuery(subquery, options)
})
return this.combineResults(results, query.combineWith)
}
}
/**
* @ignore
*/
private executeSearch (queryString: string, searchOptions: SearchOptions = {}): RawResult {
const { tokenize, processTerm, searchOptions: globalSearchOptions } = this._options
const options = { tokenize, processTerm, ...globalSearchOptions, ...searchOptions }
const { tokenize: searchTokenize, processTerm: searchProcessTerm } = options
const terms = searchTokenize(queryString)
.map((term: string) => searchProcessTerm(term))
.filter((term) => !!term) as string[]
const queries: QuerySpec[] = terms.map(termToQuerySpec(options))
const results = queries.map(query => this.executeQuerySpec(query, options))
return this.combineResults(results, options.combineWith)
}
/**
* @ignore
*/
private executeQuerySpec (query: QuerySpec, searchOptions: SearchOptions): RawResult {
const options: SearchOptionsWithDefaults = { ...this._options.searchOptions, ...searchOptions }
const boosts = (options.fields || this._options.fields).reduce((boosts, field) =>
({ ...boosts, [field]: getOwnProperty(boosts, field) || 1 }), options.boost || {})
const {
boostDocument,
weights
} = options
const { fuzzy: fuzzyWeight, prefix: prefixWeight } = { ...defaultSearchOptions.weights, ...weights }
const exactMatch = this.termResults(query.term, boosts, boostDocument, this._index.get(query.term))
if (!query.fuzzy && !query.prefix) { return exactMatch }
const results: RawResult[] = [exactMatch]
if (query.prefix) {
this._index.atPrefix(query.term).forEach((term: string, data: {}) => {
const weightedDistance = (0.3 * (term.length - query.term.length)) / term.length
results.push(this.termResults(term, boosts, boostDocument, data, prefixWeight, weightedDistance))
})
}
if (query.fuzzy) {
const fuzzy = (query.fuzzy === true) ? 0.2 : query.fuzzy
const maxDistance = fuzzy < 1 ? Math.round(query.term.length * fuzzy) : fuzzy
Object.entries(this._index.fuzzyGet(query.term, maxDistance)).forEach(([term, [data, distance]]) => {
const weightedDistance = distance / term.length
results.push(this.termResults(term, boosts, boostDocument, data, fuzzyWeight, weightedDistance))
})
}
return results.reduce(combinators[OR])
}
/**
* @ignore
*/
private combineResults (results: RawResult[], combineWith = OR): RawResult {
if (results.length === 0) { return {} }
const operator = combineWith.toLowerCase()
return results.reduce(combinators[operator]) || {}
}
/**
* Allows serialization of the index to JSON, to possibly store it and later
* deserialize it with `MiniSearch.loadJSON`.
*
* Normally one does not directly call this method, but rather call the
* standard JavaScript `JSON.stringify()` passing the `MiniSearch` instance,
* and JavaScript will internally call this method. Upon deserialization, one
* must pass to `loadJSON` the same options used to create the original
* instance that was serialized.
*
* ### Usage:
*
* ```javascript
* // Serialize the index:
* let miniSearch = new MiniSearch({ fields: ['title', 'text'] })
* miniSearch.addAll(documents)
* const json = JSON.stringify(miniSearch)
*
* // Later, to deserialize it:
* miniSearch = MiniSearch.loadJSON(json, { fields: ['title', 'text'] })
* ```
*
* @return A plain-object serializeable representation of the search index.
*/
toJSON (): AsPlainObject {
return {
index: this._index,
documentCount: this._documentCount,
nextId: this._nextId,
documentIds: this._documentIds,
fieldIds: this._fieldIds,
fieldLength: this._fieldLength,
averageFieldLength: this._averageFieldLength,
storedFields: this._storedFields
}
}
/**
* @ignore
*/
private termResults (
term: string,
boosts: { [field: string]: number },
boostDocument: ((id: any, term: string) => number) | undefined,
indexData: IndexData,
weight: number = 1,
editDistance: number = 0
): RawResult {
if (indexData == null) { return {} }
return Object.entries(boosts).reduce((
results: { [shortId: string]: { score: number, match: MatchInfo, terms: string[] } },
[field, boost]
) => {
const fieldId = this._fieldIds[field]
const { df, ds } = indexData[fieldId] || { ds: {} }
Object.entries(ds).forEach(([documentId, tf]) => {
const docBoost = boostDocument ? boostDocument(this._documentIds[documentId], term) : 1
if (!docBoost) { return }
const normalizedLength = this._fieldLength[documentId][fieldId] / this._averageFieldLength[fieldId]
results[documentId] = results[documentId] || { score: 0, match: {}, terms: [] }
results[documentId].terms.push(term)
results[documentId].match[term] = getOwnProperty(results[documentId].match, term) || []
results[documentId].score += docBoost * score(tf, df, this._documentCount, normalizedLength, boost, editDistance)
results[documentId].match[term].push(field)
})
return results
}, {})
}
/**
* @ignore
*/
private addTerm (fieldId: number, documentId: string, term: string): void {
this._index.update(term, (indexData: IndexData) => {
indexData = indexData || {}
const fieldIndex = indexData[fieldId] || { df: 0, ds: {} }
if (fieldIndex.ds[documentId] == null) { fieldIndex.df += 1 }
fieldIndex.ds[documentId] = (fieldIndex.ds[documentId] || 0) + 1
return { ...indexData, [fieldId]: fieldIndex }
})
}
/**
* @ignore
*/
private removeTerm (fieldId: number, documentId: string, term: string): void {
if (!this._index.has(term)) {
this.warnDocumentChanged(documentId, fieldId, term)
return
}
this._index.update(term, (indexData: IndexData) => {
const fieldIndex = indexData[fieldId]
if (fieldIndex == null || fieldIndex.ds[documentId] == null) {
this.warnDocumentChanged(documentId, fieldId, term)
return indexData
}
if (fieldIndex.ds[documentId] <= 1) {
if (fieldIndex.df <= 1) {
delete indexData[fieldId]
return indexData
}
fieldIndex.df -= 1
}
if (fieldIndex.ds[documentId] <= 1) {
delete fieldIndex.ds[documentId]
return indexData
}
fieldIndex.ds[documentId] -= 1
return { ...indexData, [fieldId]: fieldIndex }
})
if (Object.keys(this._index.get(term)).length === 0) {
this._index.delete(term)
}
}
/**
* @ignore
*/
private warnDocumentChanged (shortDocumentId: string, fieldId: number, term: string): void {
if (console == null || console.warn == null) { return }
const fieldName = Object.entries(this._fieldIds).find(([name, id]) => id === fieldId)![0]
console.warn(`MiniSearch: document with ID ${this._documentIds[shortDocumentId]} has changed before removal: term "${term}" was not present in field "${fieldName}". Removing a document after it has changed can corrupt the index!`)
}
/**
* @ignore
*/
private addDocumentId (documentId: any): string {
const shortDocumentId = this._nextId.toString(36)
this._documentIds[shortDocumentId] = documentId
this._documentCount += 1
this._nextId += 1
return shortDocumentId
}
/**
* @ignore
*/
private addFields (fields: string[]): void {
fields.forEach((field, i) => { this._fieldIds[field] = i })
}
/**
* @ignore
*/
private addFieldLength (documentId: string, fieldId: number, count: number, length: number): void {
this._averageFieldLength[fieldId] = this._averageFieldLength[fieldId] || 0
const totalLength = (this._averageFieldLength[fieldId] * count) + length
this._fieldLength[documentId] = this._fieldLength[documentId] || {}
this._fieldLength[documentId][fieldId] = length
this._averageFieldLength[fieldId] = totalLength / (count + 1)
}
/**
* @ignore
*/
private removeFieldLength (documentId: string, fieldId: number, count: number, length: number): void {
const totalLength = (this._averageFieldLength[fieldId] * count) - length
this._averageFieldLength[fieldId] = totalLength / (count - 1)
}
/**
* @ignore
*/
private saveStoredFields (documentId: string, doc: T): void {
const { storeFields, extractField } = this._options
if (storeFields == null || storeFields.length === 0) { return }
this._storedFields[documentId] = this._storedFields[documentId] || {}
storeFields.forEach((fieldName) => {
const fieldValue = extractField(doc, fieldName)
if (fieldValue === undefined) { return }
this._storedFields[documentId][fieldName] = fieldValue
})
}
}
const getOwnProperty = (object: any, property: string) =>
Object.prototype.hasOwnProperty.call(object, property) ? object[property] : undefined
type CombinatorFunction = (a: RawResult, b: RawResult) => RawResult
const combinators: { [kind: string]: CombinatorFunction } = {
[OR]: (a: RawResult, b: RawResult) => {
return Object.entries(b).reduce((combined: RawResult, [documentId, { score, match, terms }]) => {
if (combined[documentId] == null) {
combined[documentId] = { score, match, terms }
} else {
combined[documentId].score += score
combined[documentId].score *= 1.5
combined[documentId].terms.push(...terms)
Object.assign(combined[documentId].match, match)
}
return combined
}, a || {})
},
[AND]: (a: RawResult, b: RawResult) => {
return Object.entries(b).reduce((combined: RawResult, [documentId, { score, match, terms }]) => {
if (a[documentId] === undefined) { return combined }
combined[documentId] = combined[documentId] || {}
combined[documentId].score = a[documentId].score + score
combined[documentId].match = { ...a[documentId].match, ...match }
combined[documentId].terms = [...a[documentId].terms, ...terms]
return combined
}, {})
},
[AND_NOT]: (a: RawResult, b: RawResult) => {
return Object.entries(b).reduce((combined: RawResult, [documentId, { score, match, terms }]) => {
delete combined[documentId]
return combined
}, a || {})
}
}
const tfIdf = (tf: number, df: number, n: number): number => tf * Math.log(n / df)
const score = (
termFrequency: number,
documentFrequency: number,
documentCount: number,
normalizedLength: number,
boost: number,
editDistance: number
): number => {
const weight = boost / (1 + (0.333 * boost * editDistance))
return weight * tfIdf(termFrequency, documentFrequency, documentCount) / normalizedLength
}
const termToQuerySpec = (options: SearchOptions) => (term: string, i: number, terms: string[]): QuerySpec => {
const fuzzy = (typeof options.fuzzy === 'function')
? options.fuzzy(term, i, terms)
: (options.fuzzy || false)
const prefix = (typeof options.prefix === 'function')
? options.prefix(term, i, terms)
: (options.prefix === true)
return { term, fuzzy, prefix }
}
const uniq = <T>(array: T[]): T[] =>
array.filter((element: T, i: number, array: T[]) => array.indexOf(element) === i)
const defaultOptions = {
idField: 'id',
extractField: (document: { [key: string]: any }, fieldName: string) => document[fieldName],
tokenize: (text: string, fieldName?: string) => text.split(SPACE_OR_PUNCTUATION),
processTerm: (term: string, fieldName?: string) => term.toLowerCase(),
fields: undefined,
searchOptions: undefined,
storeFields: []
}
const defaultSearchOptions = {
combineWith: OR,
prefix: false,
fuzzy: false,
boost: {},
weights: { fuzzy: 0.9, prefix: 0.75 }
}
const defaultAutoSuggestOptions = {
prefix: (term: string, i: number, terms: string[]): boolean =>
i === terms.length - 1
}
// This regular expression matches any Unicode space or punctuation character
// Adapted from https://unicode.org/cldr/utility/list-unicodeset.jsp?a=%5Cp%7BZ%7D%5Cp%7BP%7D&abb=on&c=on&esc=on
const SPACE_OR_PUNCTUATION = /[\n\r -#%-*,-/:;?@[-\]_{}\u00A0\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u1680\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2000-\u200A\u2010-\u2029\u202F-\u2043\u2045-\u2051\u2053-\u205F\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u3000-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]+/u
| MiniSearch |
dfs_search.py | from graph import Digraph, Node, WeightedEdge
def load_map(map_filename):
"""
Parses the map file and constructs a directed graph
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a Digraph representing the map
"""
g = Digraph()
with open(map_filename, 'r') as file:
read_data = file.read().split('\n')
for elem in read_data:
read_data[read_data.index(elem)] = elem.split(' ')
read_data.remove([''])
for elem in read_data:
start = Node(elem[0])
dest = Node(elem[1])
try:
g.add_node(start)
except ValueError:
pass
try:
g.add_node(dest)
except ValueError:
pass
edge1 = WeightedEdge(start, dest, int(elem[2]), int(elem[3]))
try:
g.add_edge(edge1)
except ValueError:
pass
return g
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
|
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
"""
Finds the shortest path from start to end using a directed depth-first
search.
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings).
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then raises a ValueError.
"""
search_result = get_best_path(digraph, start, end, [[], 0, 0], max_dist_outdoors, 0, [])
try:
if search_result[-1] <= max_total_dist:
return search_result[0]
else:
raise ValueError
except TypeError:
raise ValueError
| """
Finds the shortest path between buildings.
Returns:
A tuple with the shortest-path from start to end, represented by
a list of building numbers and the distance of that path.
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then return None.
"""
start = Node(start)
end = Node(end)
path[0].append(start.get_name())
if start not in digraph.nodes or end not in digraph.nodes:
raise ValueError
elif start == end:
return tuple([path[0].copy(), path[1]])
else:
for edge in digraph.edges[start]:
if edge.get_destination().get_name() not in path[0]:
if len(best_path) == 0 or len(path[0]) < len(best_path):
if path[2] + edge.get_outdoor_distance() <= max_dist_outdoors:
path[1] += edge.get_total_distance()
path[2] += edge.get_outdoor_distance()
next_path = get_best_path(digraph, edge.get_destination(), end, path,
max_dist_outdoors, best_dist, best_path)
path[0].remove(edge.get_destination().get_name())
path[1] -= edge.get_total_distance()
path[2] -= edge.get_outdoor_distance()
else:
continue
if next_path is not None:
if best_dist == 0 or next_path[1] < best_dist:
best_path = next_path[0]
best_dist = next_path[1]
if best_dist == 0:
return None
return tuple([best_path, best_dist]) |
sum.go | package functions
import (
"github.com/TIBCOSoftware/flogo-lib/logger"
)
func AddSampleSum(a, b interface{}) interface{} |
func AggregateBlocksSum(blocks []interface{}, start int, size int) interface{} {
switch blocks[0].(type) {
case int:
return sumInt(blocks)
case float64:
return sumFloat(blocks)
case []int:
return sumIntArray(blocks)
case []float64:
return sumFloatArray(blocks)
}
//todo handle unsupported type
return 0
}
func sumInt(blocks []interface{}) interface{} {
total := 0
for _, block := range blocks {
total += block.(int)
}
return total
}
func sumFloat(blocks []interface{}) interface{} {
total := 0.0
for _, block := range blocks {
total += block.(float64)
}
return total
}
func sumIntArray(blocks []interface{}) interface{} {
firstBlock := blocks[0].([]int)
total := make([]int, len(firstBlock))
for _, block := range blocks {
arrBlock := block.([]int)
for i, val := range arrBlock {
total[i] += val
}
}
return total
}
func sumFloatArray(blocks []interface{}) interface{} {
firstBlock := blocks[0].([]float64)
total := make([]float64, len(firstBlock))
for _, block := range blocks {
arrBlock := block.([]float64)
for i, val := range arrBlock {
total[i] += val
}
}
return total
}
| {
if a == nil {
return b
} else if b == nil {
return a
}
switch x := a.(type) {
case int:
return x + b.(int)
case float64:
return x + b.(float64)
case []int:
y := b.([]int)
for idx, value := range x {
x[idx] = value + y[idx]
}
return x
case []float64:
y := b.([]float64)
for idx, value := range x {
x[idx] = value + y[idx]
}
return x
}
logger.Errorf("unknown")
panic("invalid input")
} |
global_configmap_v1.go | /*
Copyright (C) 2019 Synopsys, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package v1 |
horizonapi "github.com/blackducksoftware/horizon/pkg/api"
"github.com/blackducksoftware/horizon/pkg/components"
blackduckapi "github.com/blackducksoftware/synopsys-operator/pkg/api/blackduck/v1"
"github.com/blackducksoftware/synopsys-operator/pkg/apps/store"
"github.com/blackducksoftware/synopsys-operator/pkg/apps/types"
apputils "github.com/blackducksoftware/synopsys-operator/pkg/apps/utils"
"github.com/blackducksoftware/synopsys-operator/pkg/protoform"
"github.com/blackducksoftware/synopsys-operator/pkg/util"
"k8s.io/client-go/kubernetes"
)
// BdConfigmap holds the Black Duck config map configuration
type BdConfigmap struct {
config *protoform.Config
kubeClient *kubernetes.Clientset
blackDuck *blackduckapi.Blackduck
}
func init() {
store.Register(types.BlackDuckGlobalConfigmapV1, NewBdConfigmap)
}
// NewBdConfigmap returns the Black Duck config map configuration
func NewBdConfigmap(config *protoform.Config, kubeClient *kubernetes.Clientset, cr interface{}) (types.ConfigMapInterface, error) {
blackDuck, ok := cr.(*blackduckapi.Blackduck)
if !ok {
return nil, fmt.Errorf("unable to cast the interface to Black Duck object")
}
return &BdConfigmap{config: config, kubeClient: kubeClient, blackDuck: blackDuck}, nil
}
// GetCM returns the config map
func (b *BdConfigmap) GetCM() (*components.ConfigMap, error) {
hubConfig := components.NewConfigMap(horizonapi.ConfigMapConfig{Namespace: b.blackDuck.Spec.Namespace, Name: apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "config")})
hubData := map[string]string{
"RUN_SECRETS_DIR": "/tmp/secrets",
"HUB_VERSION": b.blackDuck.Spec.Version,
}
blackduckServiceData := map[string]string{
// TODO: commented the below 2 environs until the HUB-20482 is fixed. once it if fixed, uncomment them
//"HUB_AUTHENTICATION_HOST": util.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "authentication"),
//"AUTHENTICATION_HOST": fmt.Sprintf("%s:%d", util.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "authentication"), int32(8443)),
"CLIENT_CERT_CN": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "binaryscanner"),
"CFSSL": fmt.Sprintf("%s:8888", apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "cfssl")),
"HUB_CFSSL_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "cfssl"),
"BLACKDUCK_CFSSL_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "cfssl"),
"BLACKDUCK_CFSSL_PORT": "8888",
"HUB_DOC_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "documentation"),
"HUB_JOBRUNNER_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "jobrunner"),
"HUB_LOGSTASH_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "logstash"),
"RABBIT_MQ_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "rabbitmq"),
"BROKER_URL": fmt.Sprintf("amqps://%s/protecodesc", apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "rabbitmq")),
"HUB_REGISTRATION_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "registration"),
"HUB_SCAN_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "scan"),
"HUB_SOLR_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "solr"),
// TODO: commented the below 2 environs until the HUB-20412 is fixed. once it if fixed, uncomment them
// "BLACKDUCK_UPLOAD_CACHE_HOST": util.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "uploadcache"),
// "HUB_UPLOAD_CACHE_HOST": util.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "uploadcache"),
// TODO: commented the below environs until the HUB-20462 is fixed. once it if fixed, uncomment them
// "HUB_WEBAPP_HOST": util.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "webapp"),
"HUB_WEBSERVER_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "webserver"),
"HUB_ZOOKEEPER_HOST": apputils.GetResourceName(b.blackDuck.Name, util.BlackDuckName, "zookeeper"),
}
hubData = util.MergeEnvMaps(blackduckServiceData, hubData)
for _, value := range b.blackDuck.Spec.Environs {
values := strings.SplitN(value, ":", 2)
if len(values) == 2 {
mapKey := strings.TrimSpace(values[0])
mapValue := strings.TrimSpace(values[1])
if len(mapKey) > 0 && len(mapValue) > 0 {
hubData[mapKey] = mapValue
}
}
}
// merge default and input environs
environs := GetBlackDuckKnobs()
hubData = util.MergeEnvMaps(hubData, environs)
hubConfig.AddData(hubData)
hubConfig.AddLabels(apputils.GetVersionLabel("configmap", b.blackDuck.Name, b.blackDuck.Spec.Version))
return hubConfig, nil
}
// GetBlackDuckKnobs returns the default Black Duck knobs
func GetBlackDuckKnobs() map[string]string {
return map[string]string{
"IPV4_ONLY": "0",
"USE_ALERT": "0",
"USE_BINARY_UPLOADS": "0",
"RABBIT_MQ_PORT": "5671",
"BROKER_USE_SSL": "yes",
"SCANNER_CONCURRENCY": "1",
"HTTPS_VERIFY_CERTS": "yes",
"RABBITMQ_DEFAULT_VHOST": "protecodesc",
"RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT": "false",
"ENABLE_SOURCE_UPLOADS": "false",
"DATA_RETENTION_IN_DAYS": "180",
"MAX_TOTAL_SOURCE_SIZE_MB": "4000",
}
} |
import (
"fmt"
"strings" |
TestCgShader.py | #!/usr/bin/env python
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src1.GetOutputPort())
actor = vtk.vtkActor() | # Load the material. Here, we are loading a material
# defined in the Vtk Library. One can also specify
# a filename to a material description xml.
actor.GetProperty().LoadMaterial("CgTwisted")
# Turn shading on. Otherwise, shaders are not used.
actor.GetProperty().ShadingOn()
# Pass a shader variable need by CgTwisted.
actor.GetProperty().AddShaderVariable("Rate",1.0)
renderer.AddActor(actor)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
# --- end of script -- | actor.SetMapper(mapper) |
BeamSpotOnline_cfi.py | import FWCore.ParameterSet.Config as cms
import RecoVertex.BeamSpotProducer.beamSpotOnlineProducer_cfi as _mod
onlineBeamSpotProducer = _mod.beamSpotOnlineProducer.clone(
src = 'scalersRawToDigi',
setSigmaZ = -1, #negative value disables it. | from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(onlineBeamSpotProducer, useTransientRecord = True) | gtEvmLabel = 'gtEvmDigis'
)
|
cron_scheudule.py | import os
import time
from pathlib import Path # from path home
import schedule
print(Path.home()) # C:\Users\angel
old_files_folder_name = "old_files"
print("Hello ")
def clean_up_downloads():
print("Cleaning up Downloads")
# get all items from the downloads filder
download_folder_path = os.path.join(Path.home(), "Downloads", "Downloads")
download_items = os.listdir(download_folder_path)
moved_items = 0
| if old_files_folder_name not in download_items:
print(f"No {old_files_folder_name} folder yet, creating folder")
os.mkdir(old_files_folder_path) # create folder "old_files"
# create new folder with todays timestamp
timestamp = time.strftime("%Y_%m_%d") # Year month and day
datetime_folder_path = os.path.join(old_files_folder_path, timestamp)
if not os.path.exists(datetime_folder_path):
print(f"No {datetime_folder_path} folder yet, creating folder")
os.mkdir(datetime_folder_path)
else:
print(f"{timestamp} folder already exists in {old_files_folder_name}")
# rename all items to move them into the current datetime folder
to_be_moved = [item for item in download_items if item != old_files_folder_name] # also moves folders
for item in to_be_moved:
print(f"Moving {item} to {datetime_folder_path} folder")
old_path = os.path.join(download_folder_path, item)
new_path = os.path.join(datetime_folder_path, item)
os.rename(old_path, new_path)
moved_items += 1
print(f"Moved {moved_items} of {len(to_be_moved)} items")
# clean up the downloads folder every monday
# i execute the file on friday 20:21
schedule.every().friday.at("20:22").do(clean_up_downloads)
# keep the script running and sleep in between the checks
while True:
print("here")
schedule.run_pending()
# sleep 24h
time.sleep(1) # 60 * 60 * 24 | # create the old files folder if not present
old_files_folder_path = os.path.join(download_folder_path, old_files_folder_name) |
forms.py | import os
import re
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
import commonware.log
from olympia import amo
from olympia.accounts.views import fxa_error_message
from olympia.amo.fields import HttpHttpsOnlyURLField
from olympia.users import notifications
from olympia.amo.utils import clean_nl, has_links, slug_validator
from olympia.lib import happyforms
from olympia.translations import LOCALES
from . import tasks
from .models import (
UserProfile, UserNotification, BlacklistedName)
from .widgets import (
NotificationsSelectMultiple, RequiredCheckboxInput, RequiredEmailInput,
RequiredTextarea)
log = commonware.log.getLogger('z.users')
admin_re = re.compile('(?=.*\d)(?=.*[a-zA-Z])')
class UserDeleteForm(forms.Form):
email = forms.CharField(max_length=255, required=True,
widget=RequiredEmailInput)
confirm = forms.BooleanField(required=True, widget=RequiredCheckboxInput)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserDeleteForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs['placeholder'] = (
self.request.user.email)
def clean_email(self):
user_email = self.request.user.email
if not user_email == self.cleaned_data['email']:
raise forms.ValidationError(_('Email must be {email}.').format(
email=user_email))
def clean(self):
amouser = self.request.user
if amouser.is_developer:
# This is tampering because the form isn't shown on the page if the
# user is a developer
log.warning(u'[Tampering] Attempt to delete developer account (%s)'
% self.request.user)
raise forms.ValidationError("")
class UserEditForm(happyforms.ModelForm):
username = forms.CharField(max_length=50, required=False)
display_name = forms.CharField(label=_lazy(u'Display Name'), max_length=50,
required=False)
location = forms.CharField(label=_lazy(u'Location'), max_length=100,
required=False)
occupation = forms.CharField(label=_lazy(u'Occupation'), max_length=100,
required=False)
homepage = HttpHttpsOnlyURLField(label=_lazy(u'Homepage'), required=False)
email = forms.EmailField(
required=False,
help_text=fxa_error_message(
_(u'Firefox Accounts users cannot currently change their email '
u'address.')),
widget=forms.EmailInput(attrs={'readonly': 'readonly'}))
photo = forms.FileField(label=_lazy(u'Profile Photo'), required=False)
notifications = forms.MultipleChoiceField(
choices=[],
widget=NotificationsSelectMultiple,
initial=notifications.NOTIFICATIONS_DEFAULT,
required=False)
lang = forms.TypedChoiceField(label=_lazy(u'Default locale'),
choices=LOCALES)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
instance = kwargs.get('instance')
if instance and instance.has_anonymous_username():
kwargs.setdefault('initial', {})
kwargs['initial']['username'] = ''
super(UserEditForm, self).__init__(*args, **kwargs)
errors = {'invalid': _('This URL has an invalid format. '
'Valid URLs look like '
'http://example.com/my_page.')}
self.fields['homepage'].error_messages = errors
if not self.instance.lang and self.request:
self.initial['lang'] = self.request.LANG
if self.instance:
default = dict((i, n.default_checked) for i, n
in notifications.NOTIFICATIONS_BY_ID.items())
user = dict((n.notification_id, n.enabled) for n
in self.instance.notifications.all())
default.update(user)
# Add choices to Notification.
choices = notifications.NOTIFICATIONS_CHOICES
if not self.instance.is_developer:
choices = notifications.NOTIFICATIONS_CHOICES_NOT_DEV
# Append a "NEW" message to new notification options.
saved = self.instance.notifications.values_list('notification_id',
flat=True)
self.choices_status = {}
for idx, label in choices:
self.choices_status[idx] = idx not in saved
self.fields['notifications'].choices = choices
self.fields['notifications'].initial = [i for i, v
in default.items() if v]
self.fields['notifications'].widget.form_instance = self
class Meta:
model = UserProfile
fields = (
'username', 'email', 'display_name', 'location', 'occupation',
'homepage', 'photo', 'lang', 'bio', 'display_collections',
'display_collections_fav', 'notifications',
)
def clean_username(self):
|
def clean_display_name(self):
name = self.cleaned_data['display_name']
if BlacklistedName.blocked(name):
raise forms.ValidationError(_('This display name cannot be used.'))
return name
def clean_email(self):
# TODO(django 1.9): Change the field to disabled=True and remove this.
return self.instance.email
def clean_photo(self):
photo = self.cleaned_data['photo']
if not photo:
return
if photo.content_type not in ('image/png', 'image/jpeg'):
raise forms.ValidationError(
_('Images must be either PNG or JPG.'))
if photo.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise forms.ValidationError(
_('Please use images smaller than %dMB.' %
(settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024 - 1)))
return photo
def clean_bio(self):
bio = self.cleaned_data['bio']
normalized = clean_nl(unicode(bio))
if has_links(normalized):
# There's some links, we don't want them.
raise forms.ValidationError(_('No links are allowed.'))
return bio
def save(self, log_for_developer=True):
u = super(UserEditForm, self).save(commit=False)
data = self.cleaned_data
photo = data['photo']
if photo:
u.picture_type = 'image/png'
tmp_destination = u.picture_path + '__unconverted'
with storage.open(tmp_destination, 'wb') as fh:
for chunk in photo.chunks():
fh.write(chunk)
tasks.resize_photo.delay(tmp_destination, u.picture_path,
set_modified_on=[u])
for (i, n) in notifications.NOTIFICATIONS_BY_ID.items():
enabled = n.mandatory or (str(i) in data['notifications'])
UserNotification.update_or_create(
user=u, notification_id=i, update={'enabled': enabled})
log.debug(u'User (%s) updated their profile' % u)
u.save()
return u
class AdminUserEditForm(UserEditForm):
"""This is the form used by admins to edit users' info."""
email = forms.EmailField(widget=RequiredEmailInput)
admin_log = forms.CharField(required=True, label='Reason for change',
widget=RequiredTextarea(attrs={'rows': 4}))
notes = forms.CharField(required=False, label='Notes',
widget=forms.Textarea(attrs={'rows': 4}))
anonymize = forms.BooleanField(required=False)
def changed_fields(self):
"""Returns changed_data ignoring these fields."""
return (set(self.changed_data) -
set(['admin_log', 'notifications', 'photo']))
def changes(self):
"""A dictionary of changed fields, old, new."""
details = dict([(k, (self.initial[k], self.cleaned_data[k]))
for k in self.changed_fields()])
return details
def clean_anonymize(self):
if (self.cleaned_data['anonymize'] and
self.changed_fields() != set(['anonymize'])):
raise forms.ValidationError(_('To anonymize, enter a reason for'
' the change but do not change any'
' other field.'))
return self.cleaned_data['anonymize']
def clean_email(self):
return self.cleaned_data['email']
def save(self, *args, **kw):
profile = super(AdminUserEditForm, self).save(log_for_developer=False)
if self.cleaned_data['anonymize']:
amo.log(amo.LOG.ADMIN_USER_ANONYMIZED, self.instance,
self.cleaned_data['admin_log'])
profile.anonymize() # This also logs
else:
amo.log(amo.LOG.ADMIN_USER_EDITED, self.instance,
self.cleaned_data['admin_log'], details=self.changes())
log.info('Admin edit user: %s changed fields: %s' %
(self.instance, self.changed_fields()))
return profile
class BlacklistedNameAddForm(forms.Form):
"""Form for adding blacklisted names in bulk fashion."""
names = forms.CharField(widget=forms.Textarea(
attrs={'cols': 40, 'rows': 16}))
def clean_names(self):
names = self.cleaned_data['names'].strip()
if not names:
raise forms.ValidationError(
_('Please enter at least one name to blacklist.'))
names = os.linesep.join(
[s.strip() for s in names.splitlines() if s.strip()])
return names
| name = self.cleaned_data['username']
if not name:
if self.instance.has_anonymous_username():
name = self.instance.username
else:
name = self.instance.anonymize_username()
# All-digits usernames are disallowed since they can be
# confused for user IDs in URLs. (See bug 862121.)
if name.isdigit():
raise forms.ValidationError(
_('Usernames cannot contain only digits.'))
slug_validator(
name, lower=False,
message=_('Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.'))
if BlacklistedName.blocked(name):
raise forms.ValidationError(_('This username cannot be used.'))
# FIXME: Bug 858452. Remove this check when collation of the username
# column is changed to case insensitive.
if (UserProfile.objects.exclude(id=self.instance.id)
.filter(username__iexact=name).exists()):
raise forms.ValidationError(_('This username is already in use.'))
return name |
deserialize_script.rs | extern crate tapyrus;
use tapyrus::util::address::Address;
use tapyrus::network::constants::Network;
use tapyrus::blockdata::script;
use tapyrus::consensus::encode;
fn do_test(data: &[u8]) {
let s: Result<script::Script, _> = encode::deserialize(data);
if let Ok(script) = s {
let _: Result<Vec<script::Instruction>, script::Error> = script.instructions().collect();
let mut b = script::Builder::new();
for ins in script.instructions_minimal() {
if ins.is_err() {
return;
}
match ins.ok().unwrap() {
script::Instruction::Op(op) => { b = b.push_opcode(op); }
script::Instruction::PushBytes(bytes) => {
// Any one-byte pushes, except -0, which can be interpreted as numbers, should be
// reserialized as numbers. (For -1 through 16, this will use special ops; for
// others it'll just reserialize them as pushes.)
if bytes.len() == 1 && bytes[0] != 0x80 && bytes[0] != 0x00 {
if let Ok(num) = script::read_scriptint(bytes) {
b = b.push_int(num);
} else {
b = b.push_slice(bytes);
}
} else {
b = b.push_slice(bytes);
}
}
}
}
assert_eq!(b.into_script(), script);
assert_eq!(data, &encode::serialize(&script)[..]);
// Check if valid address and if that address roundtrips.
if let Some(addr) = Address::from_script(&script, Network::Prod) {
assert_eq!(addr.script_pubkey(), script);
}
}
}
#[cfg(feature = "afl")]
#[macro_use] extern crate afl;
#[cfg(feature = "afl")]
fn main() {
fuzz!(|data| {
do_test(&data);
});
}
#[cfg(feature = "honggfuzz")]
#[macro_use] extern crate honggfuzz;
#[cfg(feature = "honggfuzz")]
fn main() {
loop {
fuzz!(|data| {
do_test(data);
});
}
}
#[cfg(test)]
mod tests {
fn extend_vec_from_hex(hex: &str, out: &mut Vec<u8>) {
let mut b = 0;
for (idx, c) in hex.as_bytes().iter().enumerate() {
b <<= 4;
match *c {
b'A'...b'F' => b |= c - b'A' + 10,
b'a'...b'f' => b |= c - b'a' + 10,
b'0'...b'9' => b |= c - b'0',
_ => panic!("Bad hex"),
} | }
}
}
#[test]
fn duplicate_crash() {
let mut a = Vec::new();
extend_vec_from_hex("00", &mut a);
super::do_test(&a);
}
} | if (idx & 1) == 1 {
out.push(b);
b = 0; |
rrdp.rs | //! Parsing the XML representations.
#![cfg(feature = "rrdp")]
use std::{fmt, io, ops, str};
use log::info;
use ring::digest;
use uuid::Uuid;
use crate::uri;
use crate::xml::decode::{Reader, Name, Error};
//------------ NotificationFile ----------------------------------------------
pub struct NotificationFile {
pub session_id: Uuid,
pub serial: u64,
pub snapshot: UriAndHash,
pub deltas: Vec<(u64, UriAndHash)>,
}
impl NotificationFile {
pub fn parse<R: io::BufRead>(reader: R) -> Result<Self, Error> {
let mut reader = Reader::new(reader);
let mut session_id = None;
let mut serial = None;
let mut outer = reader.start(|element| {
if element.name() != NOTIFICATION {
return Err(Error::Malformed)
}
element.attributes(|name, value| match name {
b"version" => {
if value.ascii_into::<u8>()? != 1 {
return Err(Error::Malformed)
}
Ok(())
}
b"session_id" => {
session_id = Some(value.ascii_into()?);
Ok(())
}
b"serial" => {
serial = Some(value.ascii_into()?);
Ok(())
}
_ => Err(Error::Malformed)
})
})?;
let mut snapshot = None;
let mut deltas = Vec::new();
while let Some(mut content) = outer.take_opt_element(&mut reader,
|element| {
match element.name() {
SNAPSHOT => {
if snapshot.is_some() {
return Err(Error::Malformed)
}
let mut uri = None;
let mut hash = None;
element.attributes(|name, value| match name {
b"uri" => {
uri = Some(value.ascii_into()?);
Ok(())
}
b"hash" => {
hash = Some(value.ascii_into()?);
Ok(())
}
_ => Err(Error::Malformed)
})?;
match (uri, hash) {
(Some(uri), Some(hash)) => {
snapshot = Some(UriAndHash::new(uri, hash));
Ok(())
}
_ => Err(Error::Malformed)
}
}
DELTA => {
let mut serial = None;
let mut uri = None;
let mut hash = None;
element.attributes(|name, value| match name {
b"serial" => {
serial = Some(value.ascii_into()?);
Ok(())
}
b"uri" => {
uri = Some(value.ascii_into()?);
Ok(())
}
b"hash" => {
hash = Some(value.ascii_into()?);
Ok(())
}
_ => Err(Error::Malformed)
})?;
match (serial, uri, hash) {
(Some(serial), Some(uri), Some(hash)) => {
deltas.push((serial, UriAndHash::new(uri, hash)));
Ok(())
}
_ => Err(Error::Malformed)
}
}
_ => Err(Error::Malformed)
}
})? {
content.take_end(&mut reader)?;
}
outer.take_end(&mut reader)?;
reader.end()?;
match (session_id, serial, snapshot) {
(Some(session_id), Some(serial), Some(snapshot)) => {
Ok(NotificationFile { session_id, serial, snapshot, deltas })
}
_ => Err(Error::Malformed)
}
}
}
//------------ ProcessSnapshot -----------------------------------------------
pub trait ProcessSnapshot {
type Err: From<Error>;
fn meta(
&mut self,
session_id: Uuid,
serial: u64,
) -> Result<(), Self::Err>;
fn publish(
&mut self,
uri: uri::Rsync,
data: Vec<u8>,
) -> Result<(), Self::Err>;
fn process<R: io::BufRead>(
&mut self,
reader: R
) -> Result<(), Self::Err> {
let mut reader = Reader::new(reader);
let mut session_id = None;
let mut serial = None;
let mut outer = reader.start(|element| {
if element.name() != SNAPSHOT {
info!("Bad outer: not snapshot, but {:?}", element.name());
return Err(Error::Malformed)
}
element.attributes(|name, value| match name {
b"version" => {
if value.ascii_into::<u8>()? != 1 {
info!("Bad version");
return Err(Error::Malformed)
}
Ok(())
}
b"session_id" => {
session_id = Some(value.ascii_into()?);
Ok(())
}
b"serial" => {
serial = Some(value.ascii_into()?);
Ok(())
}
_ => {
info!("Bad attribute on snapshot.");
Err(Error::Malformed)
}
})
})?;
match (session_id, serial) {
(Some(session_id), Some(serial)) => {
self.meta(session_id, serial)?;
}
_ => {
info!("Missing session or serial");
return Err(Error::Malformed.into())
}
}
loop {
let mut uri = None;
let inner = outer.take_opt_element(&mut reader, |element| {
if element.name() != PUBLISH {
info!("Bad inner: not publish");
return Err(Error::Malformed)
}
element.attributes(|name, value| match name {
b"uri" => {
uri = Some(value.ascii_into()?);
Ok(())
}
_ => {
info!("Bad attribute on publish.");
Err(Error::Malformed)
}
})
})?;
let mut inner = match inner {
Some(inner) => inner,
None => break
};
let uri = match uri {
Some(uri) => uri,
None => return Err(Error::Malformed.into())
};
let data = inner.take_text(&mut reader, |text| {
let text: Vec<_> = text.to_ascii()?.as_bytes()
.iter().filter_map(|b| {
if b.is_ascii_whitespace() { None }
else { Some(*b) }
}).collect();
base64::decode(&text).map_err(|_| {
Error::Malformed
})
})?;
self.publish(uri, data)?;
inner.take_end(&mut reader)?;
}
outer.take_end(&mut reader)?;
reader.end()?;
Ok(())
}
}
//------------ ProcessDelta --------------------------------------------------
pub trait ProcessDelta {
type Err: From<Error>;
fn meta(
&mut self,
session_id: Uuid,
serial: u64,
) -> Result<(), Self::Err>;
fn publish(
&mut self,
uri: uri::Rsync,
hash: Option<DigestHex>,
data: Vec<u8>,
) -> Result<(), Self::Err>;
fn withdraw(
&mut self,
uri: uri::Rsync,
hash: DigestHex,
) -> Result<(), Self::Err>;
fn process<R: io::BufRead>(
&mut self,
reader: R
) -> Result<(), Self::Err> {
let mut reader = Reader::new(reader);
let mut session_id = None;
let mut serial = None;
let mut outer = reader.start(|element| {
if element.name() != DELTA {
return Err(Error::Malformed)
}
element.attributes(|name, value| match name {
b"version" => {
if value.ascii_into::<u8>()? != 1 {
return Err(Error::Malformed)
}
Ok(())
}
b"session_id" => {
session_id = Some(value.ascii_into()?);
Ok(())
}
b"serial" => {
serial = Some(value.ascii_into()?);
Ok(())
}
_ => Err(Error::Malformed)
})
})?;
match (session_id, serial) {
(Some(session_id), Some(serial)) => {
self.meta(session_id, serial)?;
}
_ => return Err(Error::Malformed.into()),
}
loop {
let mut action = None;
let mut uri = None;
let mut hash = None;
let inner = outer.take_opt_element(&mut reader, |element| {
match element.name() {
PUBLISH => action = Some(Action::Publish),
WITHDRAW => action = Some(Action::Withdraw),
_ => return Err(Error::Malformed),
};
element.attributes(|name, value| match name {
b"uri" => {
uri = Some(value.ascii_into()?);
Ok(())
}
b"hash" => {
hash = Some(value.ascii_into()?);
Ok(())
}
_ => Err(Error::Malformed) | })
})?;
let mut inner = match inner {
Some(inner) => inner,
None => break
};
let uri = match uri {
Some(uri) => uri,
None => return Err(Error::Malformed.into())
};
match action.unwrap() { // Or we'd have exited already.
Action::Publish => {
let data = inner.take_text(&mut reader, |text| {
let text: Vec<_> = text.to_ascii()?.as_bytes()
.iter().filter_map(|b| {
if b.is_ascii_whitespace() { None }
else { Some(*b) }
}).collect();
base64::decode(&text)
.map_err(|_| Error::Malformed)
})?;
self.publish(uri, hash, data)?;
}
Action::Withdraw => {
let hash = match hash {
Some(hash) => hash,
None => return Err(Error::Malformed.into())
};
self.withdraw(uri, hash)?;
}
}
inner.take_end(&mut reader)?;
}
outer.take_end(&mut reader)?;
reader.end()?;
Ok(())
}
}
//------------ UriAndHash ----------------------------------------------------
#[derive(Clone, Debug)]
pub struct UriAndHash {
uri: uri::Https,
hash: DigestHex,
}
impl UriAndHash {
pub fn new(uri: uri::Https, hash: DigestHex) -> Self {
UriAndHash { uri, hash }
}
pub fn uri(&self) -> &uri::Https {
&self.uri
}
pub fn hash(&self) -> &DigestHex {
&self.hash
}
}
//------------ DigestHex -----------------------------------------------------
/// A helper type to encode a digest as a sequence of hex-digits.
#[derive(Clone, Debug)]
pub struct DigestHex(Vec<u8>);
impl From<Vec<u8>> for DigestHex {
fn from(value: Vec<u8>) -> DigestHex {
DigestHex(value)
}
}
impl From<digest::Digest> for DigestHex {
fn from(value: digest::Digest) -> DigestHex {
DigestHex(Vec::from(value.as_ref()))
}
}
impl str::FromStr for DigestHex {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut res = Vec::new();
let mut s = s.chars();
while let Some(first) = s.next() {
let first = first.to_digit(16).ok_or("invalid digest")?;
let second = s.next().ok_or("invalid digest")?
.to_digit(16).ok_or("invalid digest")?;
res.push((first << 4 | second) as u8);
}
Ok(DigestHex(res))
}
}
impl ops::Deref for DigestHex {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl AsRef<[u8]> for DigestHex {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl fmt::Display for DigestHex {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for &ch in self.0.as_slice() {
write!(f, "{:02x}", ch)?;
}
Ok(())
}
}
//------------ Action --------------------------------------------------------
enum Action {
Publish,
Withdraw,
}
//------------ Xml Names -----------------------------------------------------
const NS: &[u8] = b"http://www.ripe.net/rpki/rrdp";
const NOTIFICATION: Name = Name::qualified(NS, b"notification");
const SNAPSHOT: Name = Name::qualified(NS, b"snapshot");
const DELTA: Name = Name::qualified(NS, b"delta");
const PUBLISH: Name = Name::qualified(NS, b"publish");
const WITHDRAW: Name = Name::qualified(NS, b"withdraw");
//============ Tests =========================================================
#[cfg(test)]
mod test {
use super::*;
pub struct Test;
impl ProcessSnapshot for Test {
type Err = Error;
fn meta(
&mut self,
_session_id: Uuid,
_serial: u64,
) -> Result<(), Self::Err> {
Ok(())
}
fn publish(
&mut self,
_uri: uri::Rsync,
_data: Vec<u8>,
) -> Result<(), Self::Err> {
Ok(())
}
}
impl ProcessDelta for Test {
type Err = Error;
fn meta(
&mut self,
_session_id: Uuid,
_serial: u64,
) -> Result<(), Self::Err> {
Ok(())
}
fn publish(
&mut self,
_uri: uri::Rsync,
_hash: Option<DigestHex>,
_data: Vec<u8>,
) -> Result<(), Self::Err> {
Ok(())
}
fn withdraw(
&mut self,
_uri: uri::Rsync,
_hash: DigestHex,
) -> Result<(), Self::Err> {
Ok(())
}
}
#[test]
fn ripe_notification() {
NotificationFile::parse(
include_bytes!("../test-data/ripe-notification.xml").as_ref()
).unwrap();
}
#[test]
fn ripe_snapshot() {
<Test as ProcessSnapshot>::process(
&mut Test,
include_bytes!("../test-data/ripe-snapshot.xml").as_ref()
).unwrap();
}
#[test]
fn ripe_delta() {
<Test as ProcessDelta>::process(
&mut Test,
include_bytes!("../test-data/ripe-delta.xml").as_ref()
).unwrap();
}
} | |
unity.py | from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
class | (InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P<id>[^/?#&]+)"
)
_TESTS = [
{
"url": "https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim",
"info_dict": {
"id": "jWuNtik0C8E",
"ext": "mp4",
"title": "Live Training 22nd September 2014 - Animate Anything",
"description": "md5:e54913114bd45a554c56cdde7669636e",
"duration": 2893,
"uploader": "Unity",
"uploader_id": "Unity3D",
"upload_date": "20140926",
},
},
{
"url": "https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_id = self._search_regex(
r'data-video-id="([_0-9a-zA-Z-]+)"', webpage, "youtube ID"
)
return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id)
| UnityIE |
smph21.rs | #[doc = "Reader of register SMPH21"]
pub type R = crate::R<u32, super::SMPH21>;
#[doc = "Writer for register SMPH21"]
pub type W = crate::W<u32, super::SMPH21>;
#[doc = "Register SMPH21 `reset()`'s with value 0x01"]
impl crate::ResetValue for super::SMPH21 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x01
}
}
#[doc = "Reader of field `RESERVED1`"]
pub type RESERVED1_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `RESERVED1`"]
pub struct RESERVED1_W<'a> { | }
impl<'a> RESERVED1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x7fff_ffff << 1)) | (((value as u32) & 0x7fff_ffff) << 1);
self.w
}
}
#[doc = "Reader of field `STAT`"]
pub type STAT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `STAT`"]
pub struct STAT_W<'a> {
w: &'a mut W,
}
impl<'a> STAT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bits 1:31 - 31:1\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved1(&self) -> RESERVED1_R {
RESERVED1_R::new(((self.bits >> 1) & 0x7fff_ffff) as u32)
}
#[doc = "Bit 0 - 0:0\\]
Status when reading: 0: Semaphore is taken 1: Semaphore is available Reading the register causes it to change value to 0. Releasing the semaphore is done by writing 1."]
#[inline(always)]
pub fn stat(&self) -> STAT_R {
STAT_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 1:31 - 31:1\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved1(&mut self) -> RESERVED1_W {
RESERVED1_W { w: self }
}
#[doc = "Bit 0 - 0:0\\]
Status when reading: 0: Semaphore is taken 1: Semaphore is available Reading the register causes it to change value to 0. Releasing the semaphore is done by writing 1."]
#[inline(always)]
pub fn stat(&mut self) -> STAT_W {
STAT_W { w: self }
}
} | w: &'a mut W, |
plugins.py | # This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
"""
Mechanisms to dynamically load extra modules to help the LaTeX compilation.
All the modules must be derived from the TexModule class.
"""
import imp
from os.path import *
from msg import _, msg
import sys
class TexModule (object):
"""
This is the base class for modules. Each module should define a class
named 'Module' that derives from this one. The default implementation
provides all required methods with no effects.
"""
def __init__ (self, env, dict):
"""
The constructor receives two arguments: 'env' is the compiling
environment, 'dict' is a dictionary that describes the command that
caused the module to load.
"""
def pre_compile (self):
"""
This method is called before the first LaTeX compilation. It is
supposed to build any file that LaTeX would require to compile the
document correctly. The method must return true on failure.
"""
return 0
def post_compile (self):
"""
This method is called after each LaTeX compilation. It is supposed to
process the compilation results and possibly request a new
compilation. The method must return true on failure.
"""
return 0
def last_compile (self):
"""
This method is called after the last LaTeX compilation.
It is supposed to terminate the compilation for its specific needs.
The method must return true on failure.
"""
return 0
def clean (self):
"""
This method is called when cleaning the compiled files. It is supposed
to remove all the files that this modules generates.
"""
def command (self, cmd, args):
"""
This is called when a directive for the module is found in the source.
The method can raise 'AttributeError' when the directive does not
exist and 'TypeError' if the syntax is wrong. By default, when called
with argument "foo" it calls the method "do_foo" if it exists, and
fails otherwise.
"""
getattr(self, "do_" + cmd)(*args)
def get_errors (self):
"""
This is called if something has failed during an operation performed
by this module. The method returns a generator with items of the same
form as in LaTeXDep.get_errors.
"""
if None:
yield None
class Plugins (object):
"""
This class gathers operations related to the management of external Python
modules. Modules are requested through the `register' method, and
they are searched for first in the current directory, then in the
(possibly) specified Python package (using Python's path).
"""
def __init__ (self, path=None):
"""
Initialize the module set, possibly setting a path name in which
modules will be searched for.
"""
self.modules = {}
if not path:
self.path = [dirname(__file__)]
sys.path.append(self.path[0])
else:
self.path = path
def __getitem__ (self, name):
"""
Return the module object of the given name.
"""
return self.modules[name]
def register (self, name):
|
def clear(self):
"""
Empty the module table, unregistering every module registered. No
modules are unloaded, however, but this has no other effect than
speeding the registration if the modules are loaded again.
"""
self.modules.clear()
class Modules (Plugins):
"""
This class gathers all operations related to the management of modules.
The modules are searched for first in the current directory, then as
scripts in the 'modules' directory in the program's data directort, then
as a Python module in the package `rubber.latex'.
"""
def __init__ (self, env):
#Plugins.__init__(self, rubber.rules.latex.__path__)
Plugins.__init__(self)
self.env = env
self.objects = {}
self.commands = {}
def __getitem__ (self, name):
"""
Return the module object of the given name.
"""
return self.objects[name]
def has_key (self, name):
"""
Check if a given module is loaded.
"""
return self.objects.has_key(name)
def register (self, name, dict={}):
"""
Attempt to register a package with the specified name. If a module is
found, create an object from the module's class called `Module',
passing it the environment and `dict' as arguments, and execute all
delayed commands for this module. The dictionary describes the
command that caused the registration.
"""
if self.has_key(name):
msg.debug(_("module %s already registered") % name)
return 2
# First look for a script
moddir = ""
mod = None
for path in "", join(moddir, "modules"):
file = join(path, name + ".rub")
if exists(file):
mod = ScriptModule(self.env, file)
msg.log(_("script module %s registered") % name)
break
# Then look for a Python module
if not mod:
if Plugins.register(self, name) == 0:
msg.debug(_("no support found for %s") % name)
return 0
mod = self.modules[name].Module(self.env, dict)
msg.log(_("built-in module %s registered") % name)
# Run any delayed commands.
if self.commands.has_key(name):
for (cmd, args, vars) in self.commands[name]:
msg.push_pos(vars)
try:
mod.command(cmd, args)
except AttributeError:
msg.warn(_("unknown directive '%s.%s'") % (name, cmd))
except TypeError:
msg.warn(_("wrong syntax for '%s.%s'") % (name, cmd))
msg.pop_pos()
del self.commands[name]
self.objects[name] = mod
return 1
def clear (self):
"""
Unregister all modules.
"""
Plugins.clear(self)
self.objects = {}
self.commands = {}
def command (self, mod, cmd, args):
"""
Send a command to a particular module. If this module is not loaded,
store the command so that it will be sent when the module is register.
"""
if self.objects.has_key(mod):
self.objects[mod].command(cmd, args)
else:
if not self.commands.has_key(mod):
self.commands[mod] = []
self.commands[mod].append((cmd, args, self.env.vars.copy()))
| """
Attempt to register a module with the specified name. If an
appropriate module is found, load it and store it in the object's
dictionary. Return 0 if no module was found, 1 if a module was found
and loaded, and 2 if the module was found but already loaded.
"""
if self.modules.has_key(name):
return 2
try:
file, path, descr = imp.find_module(name, [""])
except ImportError:
if not self.path:
return 0
try:
file, path, descr = imp.find_module(name, self.path)
except ImportError:
return 0
module = imp.load_module(name, file, path, descr)
file.close()
self.modules[name] = module
return 1 |
serverwriter.go | // Copyright (c) 2018 Palantir Technologies. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conjure
import (
"fmt"
"go/token"
"strings"
"github.com/palantir/goastwriter/astgen"
"github.com/palantir/goastwriter/decl"
"github.com/palantir/goastwriter/expression"
"github.com/palantir/goastwriter/statement"
werror "github.com/palantir/witchcraft-go-error"
"github.com/pkg/errors"
"github.com/palantir/conjure-go/v4/conjure-api/conjure/spec"
"github.com/palantir/conjure-go/v4/conjure/transforms"
"github.com/palantir/conjure-go/v4/conjure/types"
"github.com/palantir/conjure-go/v4/conjure/visitors"
"github.com/palantir/conjure-go/v4/conjure/werrorexpressions"
)
const (
registerPrefix = "RegisterRoutes"
errorName = "err"
okName = "ok"
implName = "impl"
// Handler
handlerName = "handler"
// Router
routerVarName = "router"
routerImportPackage = "wrouter"
routerImportClass = "Router"
routerPathParamsMapFunc = "PathParams"
resourceName = "resource"
// Server
serverResourceImportPackage = "wresource"
serverResourceFunctionName = "New"
restImportPackage = "rest"
// Handler
handlerStructNameSuffix = "Handler"
handlerFunctionNamePrefix = "Handle"
// Auth
funcParseBearerTokenHeader = "ParseBearerTokenHeader"
authCookieVar = "authCookie"
// ResponseWriter
responseWriterVarName = "rw"
responseArgVarName = "respArg"
httpPackageName = "http"
responseWriterType = "ResponseWriter"
// Request
requestVarName = "req"
requestVarType = "*" + httpPackageName + ".Request"
requestHeaderFunc = "Header"
requestURLField = "URL"
urlQueryFunc = "Query"
// Codecs
codecsJSON = "codecs.JSON"
codecEncodeFunc = "Encode"
codecDecodeFunc = "Decode"
codecContentTypeFunc = "ContentType"
)
func ASTForServerRouteRegistration(serviceDefinition spec.ServiceDefinition, info types.PkgInfo) ([]astgen.ASTDecl, error) {
info.AddImports(
"github.com/palantir/conjure-go-runtime/conjure-go-contract/codecs",
"github.com/palantir/witchcraft-go-server/rest",
"github.com/palantir/witchcraft-go-server/witchcraft",
"github.com/palantir/witchcraft-go-server/witchcraft/wresource",
"github.com/palantir/witchcraft-go-server/wrouter")
info.SetImports("werror", "github.com/palantir/witchcraft-go-error")
serviceName := serviceDefinition.ServiceName.Name
funcName := registerPrefix + strings.Title(serviceName)
serviceImplName := transforms.Export(serviceName)
body, err := getRegisterRoutesBody(serviceDefinition)
if err != nil {
return nil, err
}
registerRoutesFunc := &decl.Function{
Comment: funcName + " registers handlers for the " + serviceName + " endpoints with a witchcraft wrouter.\n" +
"This should typically be called in a witchcraft server's InitFunc.\n" +
"impl provides an implementation of each endpoint, which can assume the request parameters have been parsed\n" +
"in accordance with the Conjure specification.",
Name: funcName,
FuncType: expression.FuncType{
Params: []*expression.FuncParam{
{
Names: []string{routerVarName},
Type: expression.Type(fmt.Sprintf("%s.%s", routerImportPackage, routerImportClass)),
},
{
Names: []string{implName},
Type: expression.Type(serviceImplName),
},
},
ReturnTypes: []expression.Type{
expression.ErrorType,
},
},
Body: body,
}
components := []astgen.ASTDecl{
registerRoutesFunc,
}
return components, nil
}
func getRegisterRoutesBody(serviceDefinition spec.ServiceDefinition) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
// Create the handler struct
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(handlerName),
},
Tok: token.DEFINE,
RHS: createHandlerSpec(serviceDefinition),
})
// Create the witchcraft resource
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(resourceName),
},
Tok: token.DEFINE,
RHS: expression.NewCallFunction(serverResourceImportPackage, serverResourceFunctionName, []astgen.ASTExpr{
expression.StringVal(strings.ToLower(serviceDefinition.ServiceName.Name)),
expression.VariableVal(routerVarName),
}...),
})
// For each endpoint, register a route on the provided router
// if err := resource.Get(...); err != nil {
// return werror.Wrap(err, ...)
// }
// TODO(bmoylan): Register safe params - Nothing in the conjure def tells us what is safe or unsafe. How do we know? Kevin says this is markers
for _, endpoint := range serviceDefinition.Endpoints {
endpointTitleName := strings.Title(string(endpoint.EndpointName))
stmt := statement.If{ | token.DEFINE,
expression.NewCallFunction(resourceName, getResourceFunction(endpoint), []astgen.ASTExpr{
expression.StringVal(endpointTitleName),
expression.StringVal(getPathToRegister(endpoint)),
astForRestJSONHandler(expression.NewSelector(expression.VariableVal(handlerName), "Handle"+endpointTitleName)),
}...),
),
Cond: &expression.Binary{
LHS: expression.VariableVal(errorName),
Op: token.NEQ,
RHS: expression.Nil,
},
Body: []astgen.ASTStmt{
&statement.Return{
Values: []astgen.ASTExpr{
werrorexpressions.CreateWrapWErrorExpression(errorName, "failed to add route", map[string]string{"routeName": endpointTitleName}),
},
},
},
}
body = append(body, &stmt)
}
// Return nil if everything registered
body = append(body, &statement.Return{
Values: []astgen.ASTExpr{expression.Nil},
})
return body, nil
}
func createHandlerSpec(serviceDefinition spec.ServiceDefinition) astgen.ASTExpr {
return expression.NewCompositeLit(
expression.Type(getHandlerStuctName(serviceDefinition)),
expression.NewKeyValue(implName, expression.VariableVal(implName)),
)
}
func getPathToRegister(endpointDefinition spec.EndpointDefinition) string {
return string(endpointDefinition.HttpPath)
}
func getResourceFunction(endpointDefinition spec.EndpointDefinition) string {
switch endpointDefinition.HttpMethod {
case spec.HttpMethodGet:
return "Get"
case spec.HttpMethodPost:
return "Post"
case spec.HttpMethodPut:
return "Put"
case spec.HttpMethodDelete:
return "Delete"
default:
return "Unknown"
}
}
func AstForServerInterface(serviceDefinition spec.ServiceDefinition, info types.PkgInfo) ([]astgen.ASTDecl, error) {
serviceName := serviceDefinition.ServiceName.Name
interfaceAST, _, err := serverServiceInterfaceAST(serviceDefinition, info, serviceASTConfig{})
if err != nil {
return nil, errors.Wrapf(err, "failed to generate interface for service %q", serviceName)
}
components := []astgen.ASTDecl{
interfaceAST,
}
return components, nil
}
func AstForServerFunctionHandler(serviceDefinition spec.ServiceDefinition, info types.PkgInfo) ([]astgen.ASTDecl, error) {
var components []astgen.ASTDecl
implStructs := getHandlerStruct(serviceDefinition)
components = append(components, implStructs)
methods, err := getHandleMethods(serviceDefinition, info)
if err != nil {
return nil, err
}
for _, method := range methods {
components = append(components, method)
}
return components, nil
}
func getHandleMethods(serviceDefinition spec.ServiceDefinition, info types.PkgInfo) ([]*decl.Method, error) {
var methods []*decl.Method
for _, endpoint := range serviceDefinition.Endpoints {
method, err := getHandleMethod(serviceDefinition, endpoint, info)
if err != nil {
return nil, err
}
methods = append(methods, method)
}
return methods, nil
}
func getHandleMethod(serviceDefinition spec.ServiceDefinition, endpoint spec.EndpointDefinition, info types.PkgInfo) (*decl.Method, error) {
info.AddImports("net/http")
body, err := getHandleMethodBody(serviceDefinition, endpoint, info)
if err != nil {
return nil, err
}
receiverName := getReceiverName(serviceDefinition)
titleEndpoint := strings.Title(string(endpoint.EndpointName))
methods := &decl.Method{
ReceiverName: receiverName,
ReceiverType: expression.Type(getHandlerStuctName(serviceDefinition)).Pointer(),
Function: decl.Function{
Name: handlerFunctionNamePrefix + titleEndpoint,
FuncType: expression.FuncType{
Params: []*expression.FuncParam{
{
Names: []string{responseWriterVarName},
Type: expression.Type(strings.Join([]string{httpPackageName, responseWriterType}, ".")),
},
{
Names: []string{requestVarName},
Type: expression.Type(requestVarType),
},
},
ReturnTypes: []expression.Type{expression.ErrorType},
},
Body: body,
},
}
return methods, nil
}
func getHandleMethodBody(serviceDefinition spec.ServiceDefinition, endpoint spec.EndpointDefinition, info types.PkgInfo) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
pathParams, err := visitors.GetPathParams(endpoint.Args)
if err != nil {
return nil, err
}
headerParams, err := visitors.GetHeaderParams(endpoint.Args)
if err != nil {
return nil, err
}
queryParams, err := visitors.GetQueryParams(endpoint.Args)
if err != nil {
return nil, err
}
bodyParams, err := visitors.GetBodyParams(endpoint.Args)
if err != nil {
return nil, err
}
var bodyParam *visitors.ArgumentDefinitionBodyParam
switch len(bodyParams) {
case 0:
case 1:
bodyParam = &bodyParams[0]
default:
return nil, errors.New("only 1 body param is supported: Conjure IR generator should have caught this")
}
authStatements, err := getAuthStatements(endpoint.Auth, info)
if err != nil {
return nil, err
}
body = append(body, authStatements...)
pathParamStatements, err := getPathParamStatements(pathParams, info)
if err != nil {
return nil, err
}
body = append(body, pathParamStatements...)
queryParamStatements, err := getQueryParamStatements(queryParams, info)
if err != nil {
return nil, err
}
body = append(body, queryParamStatements...)
headerParamStatements, err := getHeaderParamStatements(headerParams, info)
if err != nil {
return nil, err
}
body = append(body, headerParamStatements...)
bodyParamStatements, err := getBodyParamStatements(bodyParam, info)
if err != nil {
return nil, err
}
body = append(body, bodyParamStatements...)
varsToPassIntoImpl := []astgen.ASTExpr{expression.NewCallFunction(requestVarName, "Context")}
if endpoint.Auth != nil {
if headerAuth, err := visitors.GetPossibleHeaderAuth(*endpoint.Auth); err != nil {
return nil, err
} else if headerAuth != nil {
varsToPassIntoImpl = append(varsToPassIntoImpl, expression.NewCallExpression(
expression.Type(types.Bearertoken.GoType(info)),
expression.VariableVal(authHeaderVar),
))
}
if cookieAuth, err := visitors.GetPossibleCookieAuth(*endpoint.Auth); err != nil {
return nil, err
} else if cookieAuth != nil {
varsToPassIntoImpl = append(varsToPassIntoImpl, expression.VariableVal(cookieTokenVar))
}
}
for _, arg := range endpoint.Args {
varsToPassIntoImpl = append(varsToPassIntoImpl, expression.VariableVal(transforms.SafeName(string(arg.ArgName))))
}
returnStatements, err := getReturnStatements(serviceDefinition, endpoint, varsToPassIntoImpl, info)
if err != nil {
return nil, err
}
body = append(body, returnStatements...)
return body, nil
}
func getReturnStatements(
serviceDefinition spec.ServiceDefinition,
endpoint spec.EndpointDefinition,
varsToPassIntoImpl []astgen.ASTExpr,
info types.PkgInfo,
) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
receiverName := getReceiverName(serviceDefinition)
endpointName := string(endpoint.EndpointName)
endpointNameFirstLetterUpper := strings.Title(endpointName)
// This is make the call to the interface
makeFunctionCall := expression.NewCallFunction(receiverName+"."+implName, endpointNameFirstLetterUpper, varsToPassIntoImpl...)
if endpoint.Returns == nil {
// The endpoint doesn't return anything, just return the interface call
body = append(body, &statement.Return{
Values: []astgen.ASTExpr{makeFunctionCall},
})
return body, nil
}
// Make the call
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(responseArgVarName),
expression.VariableVal(errorName),
},
Tok: token.DEFINE,
RHS: makeFunctionCall,
})
// Return an error if present
body = append(body, getIfErrNotNilReturnErrExpression())
var codec types.Typer
if isBinary, err := isBinaryType(*endpoint.Returns); err != nil {
return nil, err
} else if isBinary {
codec = types.CodecBinary
} else {
codec = types.CodecJSON
}
info.AddImports(codec.ImportPaths()...)
body = append(body, statement.NewExpression(&expression.CallExpression{
Function: &expression.Selector{
Receiver: expression.NewCallFunction(responseWriterVarName, "Header"),
Selector: "Add",
},
Args: []astgen.ASTExpr{
expression.StringVal("Content-Type"),
expression.NewCallFunction(codec.GoType(info), codecContentTypeFunc),
},
}))
// Return error from writing object into response
body = append(body, &statement.Return{
Values: []astgen.ASTExpr{
expression.NewCallFunction(codec.GoType(info), codecEncodeFunc,
expression.VariableVal(responseWriterVarName),
expression.VariableVal(responseArgVarName),
),
},
})
return body, nil
}
func getBodyParamStatements(bodyParam *visitors.ArgumentDefinitionBodyParam, info types.PkgInfo) ([]astgen.ASTStmt, error) {
if bodyParam == nil {
return nil, nil
}
var body []astgen.ASTStmt
argName := transforms.SafeName(string(bodyParam.ArgumentDefinition.ArgName))
typer, err := visitors.NewConjureTypeProviderTyper(bodyParam.ArgumentDefinition.Type, info)
if err != nil {
typJSON, _ := bodyParam.ArgumentDefinition.Type.MarshalJSON()
return nil, errors.Wrapf(err, "failed to process return type %s", string(typJSON))
}
info.AddImports(typer.ImportPaths()...)
if isBinary, err := isBinaryType(bodyParam.ArgumentDefinition.Type); err != nil {
return nil, err
} else if isBinary {
// If the body argument is binary, pass req.Body directly to the impl.
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{expression.VariableVal(argName)},
Tok: token.DEFINE,
RHS: expression.NewSelector(expression.VariableVal(requestVarName), "Body"),
})
} else {
// If the request is not binary, it is JSON. Unmarshal the req.Body.
// Create the empty type of this object
body = append(body, statement.NewDecl(decl.NewVar(argName, expression.Type(typer.GoType(info)))))
// Decode request
body = append(body, &statement.If{
Init: &statement.Assignment{
LHS: []astgen.ASTExpr{expression.VariableVal(errorName)},
Tok: token.DEFINE,
RHS: expression.NewCallFunction(
codecsJSON,
codecDecodeFunc,
expression.NewSelector(expression.VariableVal(requestVarName), "Body"),
expression.NewUnary(token.AND, expression.VariableVal(argName))),
},
Cond: getIfErrNotNilExpression(),
Body: []astgen.ASTStmt{statement.NewReturn(generateNewRestError("StatusBadRequest"))},
})
}
return body, nil
}
// rest.NewError(err, rest.StatusCode(http.$statusCode))
func generateNewRestError(statusCode string) *expression.CallExpression {
return expression.NewCallFunction(restImportPackage, "NewError",
expression.VariableVal(errorName),
expression.NewCallFunction(restImportPackage, "StatusCode",
expression.NewSelector(
expression.VariableVal(httpPackageName),
statusCode,
),
),
)
}
func getAuthStatements(auth *spec.AuthType, info types.PkgInfo) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
if auth == nil {
return body, nil
}
if headerAuth, err := visitors.GetPossibleHeaderAuth(*auth); err != nil {
return nil, err
} else if headerAuth != nil {
body = append(body,
// authHeader, err := rest.ParseBearerTokenHeader(req)
// if err != nil {
// return rest.NewError(err, rest.StatusCode(http.StatusForbidden))
// }
&statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(authHeaderVar),
expression.VariableVal(errorName),
},
Tok: token.DEFINE,
RHS: expression.NewCallFunction(restImportPackage, funcParseBearerTokenHeader, expression.VariableVal(requestVarName)),
},
&statement.If{
Cond: getIfErrNotNilExpression(),
Body: []astgen.ASTStmt{statement.NewReturn(generateNewRestError("StatusForbidden"))},
},
)
return body, nil
}
if cookieAuth, err := visitors.GetPossibleCookieAuth(*auth); err != nil {
return nil, err
} else if cookieAuth != nil {
// authCookie, err := req.Cookie("P_TOKEN")
// if err != nil {
// return rest.NewError(err, rest.StatusCode(http.StatusForbidden))
// }
// cookieToken := bearertoken.Token(authCookie.Value)
body = append(body,
&statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(authCookieVar),
expression.VariableVal(errorName),
},
Tok: token.DEFINE,
RHS: expression.NewCallFunction(requestVarName, "Cookie", expression.StringVal(cookieAuth.CookieName)),
},
&statement.If{
Cond: getIfErrNotNilExpression(),
Body: []astgen.ASTStmt{statement.NewReturn(
// rest.NewError(err, rest.StatusCode(http.StatusForbidden))
expression.NewCallFunction(restImportPackage, "NewError",
expression.VariableVal(errorName),
expression.NewCallFunction(restImportPackage, "StatusCode",
expression.NewSelector(
expression.VariableVal(httpPackageName),
"StatusForbidden",
),
),
),
)},
},
statement.NewAssignment(
expression.VariableVal(cookieTokenVar),
token.DEFINE,
expression.NewCallExpression(expression.Type(types.Bearertoken.GoType(info)),
expression.NewSelector(expression.VariableVal(authCookieVar), "Value"),
),
),
)
return body, nil
}
return nil, werror.Error("Unrecognized auth type", werror.SafeParam("authType", auth))
}
func getPathParamStatements(pathParams []visitors.ArgumentDefinitionPathParam, info types.PkgInfo) ([]astgen.ASTStmt, error) {
if len(pathParams) == 0 {
return nil, nil
}
var body []astgen.ASTStmt
// Validate path params
pathParamVar := "pathParams"
// Use call back to get the path params for this request
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{
expression.VariableVal(pathParamVar),
},
Tok: token.DEFINE,
RHS: expression.NewCallFunction(routerImportPackage, routerPathParamsMapFunc, expression.VariableVal(requestVarName)),
}, &statement.If{
Cond: &expression.Binary{
LHS: expression.VariableVal(pathParamVar),
Op: token.EQL,
RHS: expression.Nil,
},
Body: []astgen.ASTStmt{&statement.Return{Values: []astgen.ASTExpr{
werrorexpressions.CreateWErrorExpression("path params not found on request: ensure this endpoint is registered with wrouter", nil),
}}},
})
for _, pathParam := range pathParams {
arg := pathParam.ArgumentDefinition
isString, err := visitors.IsSpecificConjureType(arg.Type, visitors.IsString)
if err != nil {
return nil, err
}
var strVar expression.VariableVal
if isString {
strVar = expression.VariableVal(transforms.SafeName(string(arg.ArgName)))
} else {
strVar = expression.VariableVal(arg.ArgName + "Str")
}
// For each path param, pull out the value and if it is present in the map
// argNameStr, ok := pathParams["argName"]
body = append(body, &statement.Assignment{
LHS: []astgen.ASTExpr{
strVar,
expression.VariableVal("ok"),
},
Tok: token.DEFINE,
RHS: &expression.Index{
Receiver: expression.VariableVal(pathParamVar),
Index: expression.StringVal(visitors.GetParamID(arg)),
},
})
// Check if the param does not exist
// if !ok { return werror... }
errorIfNotPresent := werrorexpressions.CreateWErrorExpression("path param not present", map[string]string{"pathParamName": string(arg.ArgName)})
createWError := &statement.Assignment{
LHS: []astgen.ASTExpr{expression.VariableVal(errorName)},
Tok: token.DEFINE,
RHS: errorIfNotPresent,
}
body = append(body, &statement.If{
Cond: expression.NewUnary(token.NOT, expression.VariableVal(okName)),
Body: []astgen.ASTStmt{
createWError,
&statement.Return{Values: []astgen.ASTExpr{generateNewRestError("StatusBadRequest")}},
},
})
// type-specific unmarshal behavior
if !isString {
argName := spec.ArgumentName(transforms.SafeName(string(arg.ArgName)))
paramStmts, err := visitors.StatementsForHTTPParam(argName, arg.Type, strVar, info)
if err != nil {
return nil, err
}
body = append(body, paramStmts...)
}
}
return body, nil
}
func getHeaderParamStatements(headerParams []visitors.ArgumentDefinitionHeaderParam, info types.PkgInfo) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
for _, headerParam := range headerParams {
arg := headerParam.ArgumentDefinition
// Pull out the header from the request
// req.Header.Get("paramID")
getHeader := &expression.CallExpression{
Function: &expression.Selector{
Receiver: &expression.Selector{
Receiver: expression.VariableVal(requestVarName),
Selector: requestHeaderFunc,
},
Selector: "Get",
},
Args: []astgen.ASTExpr{
expression.StringVal(visitors.GetParamID(headerParam.ArgumentDefinition)),
},
}
// type-specific unmarshal behavior
argName := spec.ArgumentName(transforms.SafeName(string(arg.ArgName)))
paramStmts, err := visitors.StatementsForHTTPParam(argName, arg.Type, getHeader, info)
if err != nil {
return nil, err
}
body = append(body, paramStmts...)
}
return body, nil
}
func getQueryParamStatements(queryParams []visitors.ArgumentDefinitionQueryParam, info types.PkgInfo) ([]astgen.ASTStmt, error) {
var body []astgen.ASTStmt
for _, queryParam := range queryParams {
arg := queryParam.ArgumentDefinition
// Pull out the query param from the request URL
// req.URL.Query.Get("paramID")
getQuery, err := getQueryFetchExpression(queryParam)
if err != nil {
return nil, err
}
ifErrNotNilReturnErrStatement("err", nil)
argName := spec.ArgumentName(transforms.SafeName(string(arg.ArgName)))
paramStmts, err := visitors.StatementsForHTTPParam(argName, arg.Type, getQuery, info)
if err != nil {
return nil, err
}
body = append(body, paramStmts...)
}
return body, nil
}
func getQueryFetchExpression(queryParam visitors.ArgumentDefinitionQueryParam) (astgen.ASTExpr, error) {
arg := queryParam.ArgumentDefinition
typeProvider, err := visitors.NewConjureTypeProvider(arg.Type)
if err != nil {
return nil, err
}
if typeProvider.IsSpecificType(visitors.IsSet) || typeProvider.IsSpecificType(visitors.IsList) {
// req.URL.Query()["paramID"]
selector := visitors.GetParamID(queryParam.ArgumentDefinition)
return expression.NewIndex(&expression.CallExpression{
Function: &expression.Selector{
Receiver: &expression.Selector{
Receiver: expression.VariableVal(requestVarName),
Selector: requestURLField,
},
Selector: urlQueryFunc,
},
}, expression.StringVal(selector)), nil
}
// req.URL.Query.Get("paramID")
return &expression.CallExpression{
Function: &expression.Selector{
Receiver: &expression.CallExpression{
Function: &expression.Selector{
Receiver: &expression.Selector{
Receiver: expression.VariableVal(requestVarName),
Selector: requestURLField,
},
Selector: urlQueryFunc,
},
},
Selector: "Get",
},
Args: []astgen.ASTExpr{
expression.StringVal(visitors.GetParamID(queryParam.ArgumentDefinition)),
},
}, nil
}
func getHandlerStruct(serviceDefinition spec.ServiceDefinition) *decl.Struct {
return &decl.Struct{
Name: getHandlerStuctName(serviceDefinition),
StructType: expression.StructType{
Fields: []*expression.StructField{
{
Name: implName,
Type: expression.Type(serviceDefinition.ServiceName.Name),
},
},
},
}
}
func getIfErrNotNilReturnErrExpression() astgen.ASTStmt {
return &statement.If{
Cond: getIfErrNotNilExpression(),
Body: []astgen.ASTStmt{&statement.Return{Values: []astgen.ASTExpr{expression.VariableVal(errorName)}}},
}
}
func getIfErrNotNilExpression() astgen.ASTExpr {
return &expression.Binary{
LHS: expression.VariableVal(errorName),
Op: token.NEQ,
RHS: expression.Nil,
}
}
func getHandlerStuctName(serviceDefinition spec.ServiceDefinition) string {
name := serviceDefinition.ServiceName.Name
firstCharLower := strings.ToLower(string(name[0]))
return strings.Join([]string{firstCharLower, name[1:], handlerStructNameSuffix}, "")
}
func getReceiverName(serviceDefinition spec.ServiceDefinition) string {
return string(getHandlerStuctName(serviceDefinition)[0])
}
// rest.NewJSONHandler(funcExpr, rest.StatusCodeMapper, rest.ErrHandler)
func astForRestJSONHandler(funcExpr astgen.ASTExpr) astgen.ASTExpr {
return expression.NewCallFunction(restImportPackage, "NewJSONHandler",
funcExpr,
expression.NewSelector(expression.VariableVal(restImportPackage), "StatusCodeMapper"),
expression.NewSelector(expression.VariableVal(restImportPackage), "ErrHandler"),
)
} | Init: statement.NewAssignment(
expression.VariableVal(errorName), |
tmpl.go | package main
const authTmpl = `
<!DOCTYPE html>
<html lang="tw">
<head>
<title></title>
<meta charset="UTF-8"> | var URL = 'https://notify-bot.line.me/oauth/authorize?';
URL += 'response_type=code';
URL += '&client_id={{.ClientID}}';
URL += '&redirect_uri={{.CallbackURL}}';
URL += '&scope=notify';
URL += '&state=NO_STATE';
window.location.href = URL;
}
</script>
</head>
<body>
<button onclick="oAuth2();"> 連結到 LineNotify 按鈕 </button>
</body>
` | <meta name="viewport" content="width=device-width, initial-scale=1">
<script>
function oAuth2() { |
amqp-connection-options.storage.ts | import { AMQPConnectionOptions } from '../../interface';
export class AMQConnectionOptionsStorage {
/**
* Add Queue Module Options to storage for connection
*
* @param {string} name Name of the connection, will be used as key
* @param {Options} options The options object
*/
public static add(name: string, options: AMQPConnectionOptions): void {
this.storage.set(name, options);
}
/**
* Retreive stored from storage for connection
*
* @param {string} name Name of the connection
*
* @returns {Options | null} The stored connection or null
*/
public static get(name: string): AMQPConnectionOptions | null {
if (!name) {
return null;
}
return this.storage.get(name) || null;
}
/**
* Get all connection keys | public static getKeys(): string[] {
return Array.from(this.storage.keys());
}
private static readonly storage = new Map<string, AMQPConnectionOptions>();
} | *
* @returns {string[]}
*/ |
StudentsManager.js | import React from 'react';
import { Helmet } from 'react-helmet';
import { Box, Container } from '@material-ui/core';
import LectureList from 'src/components/customer/LectureList';
import CustomerListToolbar from 'src/components/customer/CustomerListToolbar';
export default function StudentsManager () {
return (
<>
<Helmet>
<title>Students | 1984</title>
</Helmet>
<Box
sx={{
backgroundColor: 'background.default',
minHeight: '100%',
py: 3
}}
>
<Container maxWidth={false}>
<CustomerListToolbar />
<Box sx={{ pt: 3 }}>
<LectureList/> | </Box>
</>
);
} | </Box>
</Container> |
getRunAppsInfo.py | #!/usr/bin/env python3
"""This module provides the PID and the log file name of the running DQM
applications (consumers), thus completing the information generated by
ExtractAppInfoFromXML.
When used as a script the following options are accepted:
-f Show all columns
-h show headers
"""
from __future__ import print_function
import sys
import os.path
import getopt as gop
import ExtractAppInfoFromXML as appinf
# ssh srv-c2d05-18.cms ps -eo pid,cmd | grep 22101
################################################################################
def getAppPID(srv,port):
try:
print("Connecting to server: "+srv+" and fetching PID for application running on port: "+port)
cf=os.popen('ssh '+srv+' ps -eo pid,cmd | grep '+port)
l=cf.readline();
if l=="":
cf.close()
return "App Not Running"
else:
pidv=l.split()
cf.close();
return pidv[0]
except:
sys.stderr.write( "Something went really bad\n" )
return -1
################################################################################
def getAppLogFileName(srv,pid):
# try:
#pid=getAppPID(srv,port)
if pid=="App Not Running":
return "No active log file"
else:
print("Connecting to server: "+srv+" and fetching LogFile for application with PID: "+pid)
cf=os.popen('ssh '+srv+' ls -l /tmp | grep '+pid)
l=cf.readline();
if l=="":
cf.close()
return "App Not Running???"
else:
logfilev=l.split()
cf.close();
return logfilev[-1]
# except Exception,e:
# sys.stderr.write( "Something went really bad\n" + e[0])
# return -1
################################################################################
def | (filename):
(table,grid)=appinf.getAppInfo(filename,2,1,3,4)
for apps in grid:
apps.insert(3,getAppPID(apps[1],apps[2]))
apps.insert(4,getAppLogFileName(apps[1],apps[3]))
return grid
################################################################################
#Script operation #
################################################################################
if __name__ == "__main__":
fullinfo=False
headers=False
try:
(args,filename)=gop.getopt(sys.argv[1:],"hf",["help"])
except getopt.GetoptError:
sys.stderr.write( "Sintax Error unrecognised option" )
sys.stderr.write( __doc__ )
sys.exit(2)
for item in args:
if item[0]=="-h":
headers=True
elif item[0]=="-f":
fullinfo=True
elif item[0]=="--help":
sys.stdout.write(__doc__)
sys.exit(2)
if len(filename)==0:
sys.stderr.write( "\nERROR: xdaq XML config file name not present, please specify\n\n" )
sys.stdout.write(__doc__)
elif len(filename) > 1:
sys.stderr.write( "\nERROR: Too many file names or other arguments, please specify only 1\n\n" )
sys.stdout.write(__doc__)
sys.exit(2)
elif not os.path.exists(filename[0]):
sys.stderr.write( "\nERROR: xdaq XML config file does not exist please verify\n\n" )
sys.stdout.write(__doc__)
sys.exit(2)
grid=getRunningAppsInfo(filename[0])
if fullinfo:
if headers:
grid.insert(0,["Application","Server","Port","PID","LogfileName","App Config File"])
else:
if headers:
i=0;
for record in grid:
newrecord=[record[0],record[3],record[4]]
grid[i]=newrecord
del record
i+=1
grid.insert(0,["Application","PID","LogfileName"])
else:
i=0;
for record in grid:
newrecord=[record[0],record[3],record[4]]
grid[i]=newrecord
del record
i+=1
appinf.printGrid(grid)
| getRunningAppsInfo |
foreign.spec.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
import expect = require('expect.js');
import {
uuid
} from '@jupyterlab/coreutils';
import {
KernelMessage, Session
} from '@jupyterlab/services';
import {
Signal
} from '@phosphor/signaling';
import {
Panel
} from '@phosphor/widgets';
import {
IClientSession
} from '@jupyterlab/apputils';
import {
ForeignHandler
} from '@jupyterlab/console';
import {
CodeCellModel, CodeCell
} from '@jupyterlab/cells';
import {
createCodeCellFactory
} from '../notebook/utils';
import {
createClientSession, defaultRenderMime
} from '../utils';
class TestParent extends Panel implements ForeignHandler.IReceiver {
addCell(cell: CodeCell): void {
this.addWidget(cell);
}
}
class | extends ForeignHandler {
injected = new Signal<this, void>(this);
received = new Signal<this, void>(this);
rejected = new Signal<this, void>(this);
methods: string[] = [];
protected onIOPubMessage(sender: IClientSession, msg: KernelMessage.IIOPubMessage): boolean {
let injected = super.onIOPubMessage(sender, msg);
this.received.emit(void 0);
if (injected) {
this.injected.emit(void 0);
} else {
// If the message was not injected but otherwise would have been, emit
// a rejected signal. This should only happen if `enabled` is `false`.
let session = (msg.parent_header as KernelMessage.IHeader).session;
let msgType = msg.header.msg_type;
if (session !== this.session.kernel.clientId && relevantTypes.has(msgType)) {
this.rejected.emit(void 0);
}
}
return injected;
}
}
const rendermime = defaultRenderMime();
function cellFactory(): CodeCell {
let contentFactory = createCodeCellFactory();
let model = new CodeCellModel({});
let cell = new CodeCell({ model, rendermime, contentFactory });
return cell;
}
const relevantTypes = [
'execute_input',
'execute_result',
'display_data',
'stream',
'error',
'clear_output'
].reduce((acc, val) => {
acc.add(val);
return acc;
}, new Set<string>());
describe('@jupyterlab/console', () => {
describe('ForeignHandler', () => {
let local: Session.ISession;
let foreign: Session.ISession;
let handler: TestHandler;
let session: IClientSession;
before(() => {
let path = uuid();
let sessions = [Session.startNew({ path }), Session.startNew({ path })];
return Promise.all(sessions).then(([one, two]) => {
local = one;
foreign = two;
}).then(() => {
return createClientSession({ path: local.path });
}).then(s => {
session = s;
return s.initialize();
});
});
beforeEach(() => {
let parent = new TestParent();
handler = new TestHandler({ session, parent, cellFactory });
});
afterEach(() => {
handler.dispose();
});
after(() => {
local.dispose();
foreign.dispose();
return session.shutdown().then(() => {
session.dispose();
});
});
describe('#constructor()', () => {
it('should create a new foreign handler', () => {
expect(handler).to.be.a(ForeignHandler);
});
});
describe('#enabled', () => {
it('should default to `true`', () => {
expect(handler.enabled).to.be(true);
});
it('should allow foreign cells to be injected if `true`', done => {
let code = 'print("#enabled:true")';
handler.injected.connect(() => { done(); });
foreign.kernel.requestExecute({ code, stop_on_error: true });
});
it('should reject foreign cells if `false`', done => {
let code = 'print("#enabled:false")';
handler.enabled = false;
handler.rejected.connect(() => { done(); });
foreign.kernel.requestExecute({ code, stop_on_error: true });
});
});
describe('#isDisposed', () => {
it('should indicate whether the handler is disposed', () => {
expect(handler.isDisposed).to.be(false);
handler.dispose();
expect(handler.isDisposed).to.be(true);
});
});
describe('#session', () => {
it('should be a client session object', () => {
expect(handler.session.path).to.ok();
});
});
describe('#parent', () => {
it('should be set upon instantiation', () => {
let parent = new TestParent();
handler = new TestHandler({
session: handler.session, parent, cellFactory
});
expect(handler.parent).to.be(parent);
});
});
describe('#dispose()', () => {
it('should dispose the resources held by the handler', () => {
expect(handler.isDisposed).to.be(false);
handler.dispose();
expect(handler.isDisposed).to.be(true);
});
it('should be safe to call multiple times', () => {
expect(handler.isDisposed).to.be(false);
handler.dispose();
handler.dispose();
expect(handler.isDisposed).to.be(true);
});
});
describe('#onIOPubMessage()', () => {
it('should be called when messages come through', done => {
let code = 'print("onIOPubMessage:disabled")';
handler.enabled = false;
handler.received.connect(() => { done(); });
foreign.kernel.requestExecute({ code, stop_on_error: true });
});
it('should inject relevant cells into the parent', done => {
let code = 'print("#onIOPubMessage:enabled")';
let parent = handler.parent as TestParent;
expect(parent.widgets.length).to.be(0);
handler.injected.connect(() => {
expect(parent.widgets.length).to.be.greaterThan(0);
done();
});
foreign.kernel.requestExecute({ code, stop_on_error: true });
});
it('should not reject relevant iopub messages', done => {
let code = 'print("#onIOPubMessage:relevant")';
let called = 0;
handler.rejected.connect(() => {
done(new Error('rejected relevant iopub message'));
});
handler.injected.connect(() => {
if (++called === 2) {
done();
}
});
foreign.kernel.requestExecute({ code, stop_on_error: true });
});
});
});
});
| TestHandler |
BACnetObjectType.go | //
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package model
import (
"encoding/xml"
"fmt"
"github.com/apache/plc4x/plc4go/internal/plc4go/spi/utils"
"io"
)
// Code generated by build-utils. DO NOT EDIT.
type BACnetObjectType uint16
type IBACnetObjectType interface {
Serialize(io utils.WriteBuffer) error
xml.Marshaler
xml.Unmarshaler
}
const (
BACnetObjectType_ANALOG_INPUT BACnetObjectType = 0x000
BACnetObjectType_ANALOG_OUTPUT BACnetObjectType = 0x001
BACnetObjectType_ANALOG_VALUE BACnetObjectType = 0x002
BACnetObjectType_BINARY_INPUT BACnetObjectType = 0x003
BACnetObjectType_BINARY_OUTPUT BACnetObjectType = 0x004
BACnetObjectType_BINARY_VALUE BACnetObjectType = 0x005
BACnetObjectType_MULTISTATE_INPUT BACnetObjectType = 0x00D
BACnetObjectType_MULTISTATE_OUTPUT BACnetObjectType = 0x00E
BACnetObjectType_MULTISTATE_VALUE BACnetObjectType = 0x013
BACnetObjectType_SCHEDULE BACnetObjectType = 0x011
BACnetObjectType_DEVICE BACnetObjectType = 0x008
BACnetObjectType_CALENDAR BACnetObjectType = 0x006
BACnetObjectType_COMMAND BACnetObjectType = 0x007
BACnetObjectType_EVENT_ENROLLMENT BACnetObjectType = 0x009
BACnetObjectType_FILE BACnetObjectType = 0x00A
BACnetObjectType_GROUP BACnetObjectType = 0x00B
BACnetObjectType_LOOP BACnetObjectType = 0x00C
BACnetObjectType_NOTIFICATION_CLASS BACnetObjectType = 0x00F
BACnetObjectType_PROGRAM BACnetObjectType = 0x010
BACnetObjectType_AVERAGING BACnetObjectType = 0x012
BACnetObjectType_TREND_LOG BACnetObjectType = 0x014
BACnetObjectType_LIFE_SAFETY_POINT BACnetObjectType = 0x015
BACnetObjectType_LIFE_SAFETY_ZONE BACnetObjectType = 0x016
BACnetObjectType_ACCUMULATOR BACnetObjectType = 0x017
BACnetObjectType_PULSE_CONVERTER BACnetObjectType = 0x018
BACnetObjectType_EVENT_LOG BACnetObjectType = 0x019
BACnetObjectType_GLOBAL_GROUP BACnetObjectType = 0x01A
BACnetObjectType_TREND_LOG_MULTIPLE BACnetObjectType = 0x01B
BACnetObjectType_LOAD_CONTROL BACnetObjectType = 0x01C
BACnetObjectType_STRUCTURED_VIEW BACnetObjectType = 0x01D
BACnetObjectType_ACCESS_DOOR BACnetObjectType = 0x01E
BACnetObjectType_TIMER BACnetObjectType = 0x01F
BACnetObjectType_ACCESS_CREDENTIAL BACnetObjectType = 0x020
BACnetObjectType_ACCESS_POINT BACnetObjectType = 0x021
BACnetObjectType_ACCESS_RIGHTS BACnetObjectType = 0x022
BACnetObjectType_ACCESS_USER BACnetObjectType = 0x023
BACnetObjectType_ACCESS_ZONE BACnetObjectType = 0x024
BACnetObjectType_CREDENTIAL_DATA_INPUT BACnetObjectType = 0x025
BACnetObjectType_NETWORK_SECURITY BACnetObjectType = 0x026
BACnetObjectType_BITSTRING_VALUE BACnetObjectType = 0x027
BACnetObjectType_CHARACTERSTRING_VALUE BACnetObjectType = 0x028
BACnetObjectType_DATEPATTERN_VALUE BACnetObjectType = 0x029
BACnetObjectType_DATE_VALUE BACnetObjectType = 0x02A
BACnetObjectType_DATETIMEPATTERN_VALUE BACnetObjectType = 0x02B
BACnetObjectType_DATETIME_VALUE BACnetObjectType = 0x02C
BACnetObjectType_INTEGER_VALUE BACnetObjectType = 0x02D
BACnetObjectType_LARGE_ANALOG_VALUE BACnetObjectType = 0x02E
BACnetObjectType_OCTETSTRING_VALUE BACnetObjectType = 0x02F
BACnetObjectType_POSITIVE_INTEGER_VALUE BACnetObjectType = 0x030
BACnetObjectType_TIMEPATTERN_VALUE BACnetObjectType = 0x031
BACnetObjectType_TIME_VALUE BACnetObjectType = 0x032
BACnetObjectType_NOTIFICATION_FORWARDER BACnetObjectType = 0x033
BACnetObjectType_ALERT_ENROLLMENT BACnetObjectType = 0x034
BACnetObjectType_CHANNEL BACnetObjectType = 0x035
BACnetObjectType_LIGHTING_OUTPUT BACnetObjectType = 0x036
BACnetObjectType_BINARY_LIGHTING_OUTPUT BACnetObjectType = 0x037
BACnetObjectType_NETWORK_PORT BACnetObjectType = 0x038
BACnetObjectType_ELEVATOR_GROUP BACnetObjectType = 0x039
BACnetObjectType_ESCALATOR BACnetObjectType = 0x03A
)
var BACnetObjectTypeValues []BACnetObjectType
func init() {
BACnetObjectTypeValues = []BACnetObjectType{
BACnetObjectType_ANALOG_INPUT,
BACnetObjectType_ANALOG_OUTPUT,
BACnetObjectType_ANALOG_VALUE,
BACnetObjectType_BINARY_INPUT,
BACnetObjectType_BINARY_OUTPUT,
BACnetObjectType_BINARY_VALUE,
BACnetObjectType_MULTISTATE_INPUT,
BACnetObjectType_MULTISTATE_OUTPUT,
BACnetObjectType_MULTISTATE_VALUE,
BACnetObjectType_SCHEDULE,
BACnetObjectType_DEVICE,
BACnetObjectType_CALENDAR,
BACnetObjectType_COMMAND,
BACnetObjectType_EVENT_ENROLLMENT,
BACnetObjectType_FILE,
BACnetObjectType_GROUP,
BACnetObjectType_LOOP,
BACnetObjectType_NOTIFICATION_CLASS,
BACnetObjectType_PROGRAM,
BACnetObjectType_AVERAGING,
BACnetObjectType_TREND_LOG,
BACnetObjectType_LIFE_SAFETY_POINT,
BACnetObjectType_LIFE_SAFETY_ZONE,
BACnetObjectType_ACCUMULATOR,
BACnetObjectType_PULSE_CONVERTER,
BACnetObjectType_EVENT_LOG,
BACnetObjectType_GLOBAL_GROUP,
BACnetObjectType_TREND_LOG_MULTIPLE,
BACnetObjectType_LOAD_CONTROL,
BACnetObjectType_STRUCTURED_VIEW,
BACnetObjectType_ACCESS_DOOR,
BACnetObjectType_TIMER,
BACnetObjectType_ACCESS_CREDENTIAL,
BACnetObjectType_ACCESS_POINT,
BACnetObjectType_ACCESS_RIGHTS,
BACnetObjectType_ACCESS_USER,
BACnetObjectType_ACCESS_ZONE,
BACnetObjectType_CREDENTIAL_DATA_INPUT,
BACnetObjectType_NETWORK_SECURITY,
BACnetObjectType_BITSTRING_VALUE,
BACnetObjectType_CHARACTERSTRING_VALUE,
BACnetObjectType_DATEPATTERN_VALUE,
BACnetObjectType_DATE_VALUE,
BACnetObjectType_DATETIMEPATTERN_VALUE,
BACnetObjectType_DATETIME_VALUE,
BACnetObjectType_INTEGER_VALUE,
BACnetObjectType_LARGE_ANALOG_VALUE,
BACnetObjectType_OCTETSTRING_VALUE,
BACnetObjectType_POSITIVE_INTEGER_VALUE,
BACnetObjectType_TIMEPATTERN_VALUE,
BACnetObjectType_TIME_VALUE,
BACnetObjectType_NOTIFICATION_FORWARDER,
BACnetObjectType_ALERT_ENROLLMENT,
BACnetObjectType_CHANNEL,
BACnetObjectType_LIGHTING_OUTPUT,
BACnetObjectType_BINARY_LIGHTING_OUTPUT,
BACnetObjectType_NETWORK_PORT,
BACnetObjectType_ELEVATOR_GROUP,
BACnetObjectType_ESCALATOR,
}
}
func BACnetObjectTypeByValue(value uint16) BACnetObjectType {
switch value {
case 0x000:
return BACnetObjectType_ANALOG_INPUT
case 0x001:
return BACnetObjectType_ANALOG_OUTPUT
case 0x002:
return BACnetObjectType_ANALOG_VALUE
case 0x003:
return BACnetObjectType_BINARY_INPUT
case 0x004:
return BACnetObjectType_BINARY_OUTPUT
case 0x005:
return BACnetObjectType_BINARY_VALUE
case 0x006:
return BACnetObjectType_CALENDAR
case 0x007:
return BACnetObjectType_COMMAND
case 0x008:
return BACnetObjectType_DEVICE
case 0x009:
return BACnetObjectType_EVENT_ENROLLMENT
case 0x00A:
return BACnetObjectType_FILE
case 0x00B:
return BACnetObjectType_GROUP
case 0x00C:
return BACnetObjectType_LOOP
case 0x00D:
return BACnetObjectType_MULTISTATE_INPUT
case 0x00E:
return BACnetObjectType_MULTISTATE_OUTPUT
case 0x00F:
return BACnetObjectType_NOTIFICATION_CLASS
case 0x010:
return BACnetObjectType_PROGRAM
case 0x011:
return BACnetObjectType_SCHEDULE
case 0x012:
return BACnetObjectType_AVERAGING
case 0x013:
return BACnetObjectType_MULTISTATE_VALUE
case 0x014:
return BACnetObjectType_TREND_LOG
case 0x015:
return BACnetObjectType_LIFE_SAFETY_POINT
case 0x016:
return BACnetObjectType_LIFE_SAFETY_ZONE
case 0x017:
return BACnetObjectType_ACCUMULATOR
case 0x018:
return BACnetObjectType_PULSE_CONVERTER
case 0x019:
return BACnetObjectType_EVENT_LOG
case 0x01A:
return BACnetObjectType_GLOBAL_GROUP
case 0x01B:
return BACnetObjectType_TREND_LOG_MULTIPLE
case 0x01C:
return BACnetObjectType_LOAD_CONTROL
case 0x01D:
return BACnetObjectType_STRUCTURED_VIEW
case 0x01E:
return BACnetObjectType_ACCESS_DOOR
case 0x01F:
return BACnetObjectType_TIMER
case 0x020:
return BACnetObjectType_ACCESS_CREDENTIAL
case 0x021:
return BACnetObjectType_ACCESS_POINT
case 0x022:
return BACnetObjectType_ACCESS_RIGHTS
case 0x023:
return BACnetObjectType_ACCESS_USER
case 0x024:
return BACnetObjectType_ACCESS_ZONE
case 0x025:
return BACnetObjectType_CREDENTIAL_DATA_INPUT
case 0x026:
return BACnetObjectType_NETWORK_SECURITY
case 0x027:
return BACnetObjectType_BITSTRING_VALUE
case 0x028:
return BACnetObjectType_CHARACTERSTRING_VALUE
case 0x029:
return BACnetObjectType_DATEPATTERN_VALUE
case 0x02A:
return BACnetObjectType_DATE_VALUE
case 0x02B:
return BACnetObjectType_DATETIMEPATTERN_VALUE
case 0x02C:
return BACnetObjectType_DATETIME_VALUE
case 0x02D:
return BACnetObjectType_INTEGER_VALUE
case 0x02E:
return BACnetObjectType_LARGE_ANALOG_VALUE
case 0x02F:
return BACnetObjectType_OCTETSTRING_VALUE
case 0x030:
return BACnetObjectType_POSITIVE_INTEGER_VALUE
case 0x031:
return BACnetObjectType_TIMEPATTERN_VALUE
case 0x032:
return BACnetObjectType_TIME_VALUE
case 0x033:
return BACnetObjectType_NOTIFICATION_FORWARDER
case 0x034:
return BACnetObjectType_ALERT_ENROLLMENT
case 0x035:
return BACnetObjectType_CHANNEL
case 0x036:
return BACnetObjectType_LIGHTING_OUTPUT
case 0x037:
return BACnetObjectType_BINARY_LIGHTING_OUTPUT
case 0x038:
return BACnetObjectType_NETWORK_PORT
case 0x039:
return BACnetObjectType_ELEVATOR_GROUP
case 0x03A:
return BACnetObjectType_ESCALATOR
}
return 0
}
func BACnetObjectTypeByName(value string) BACnetObjectType |
func CastBACnetObjectType(structType interface{}) BACnetObjectType {
castFunc := func(typ interface{}) BACnetObjectType {
if sBACnetObjectType, ok := typ.(BACnetObjectType); ok {
return sBACnetObjectType
}
return 0
}
return castFunc(structType)
}
func (m BACnetObjectType) LengthInBits() uint16 {
return 10
}
func (m BACnetObjectType) LengthInBytes() uint16 {
return m.LengthInBits() / 8
}
func BACnetObjectTypeParse(io utils.ReadBuffer) (BACnetObjectType, error) {
val, err := io.ReadUint16("BACnetObjectType", 10)
if err != nil {
return 0, nil
}
return BACnetObjectTypeByValue(val), nil
}
func (e BACnetObjectType) Serialize(io utils.WriteBuffer) error {
err := io.WriteUint16("BACnetObjectType", 10, uint16(e), utils.WithAdditionalStringRepresentation(e.name()))
return err
}
func (m *BACnetObjectType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var token xml.Token
var err error
for {
token, err = d.Token()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
switch token.(type) {
case xml.CharData:
tok := token.(xml.CharData)
*m = BACnetObjectTypeByName(string(tok))
}
}
}
func (m BACnetObjectType) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if err := e.EncodeElement(m.String(), start); err != nil {
return err
}
return nil
}
func (e BACnetObjectType) name() string {
switch e {
case BACnetObjectType_ANALOG_INPUT:
return "ANALOG_INPUT"
case BACnetObjectType_ANALOG_OUTPUT:
return "ANALOG_OUTPUT"
case BACnetObjectType_ANALOG_VALUE:
return "ANALOG_VALUE"
case BACnetObjectType_BINARY_INPUT:
return "BINARY_INPUT"
case BACnetObjectType_BINARY_OUTPUT:
return "BINARY_OUTPUT"
case BACnetObjectType_BINARY_VALUE:
return "BINARY_VALUE"
case BACnetObjectType_CALENDAR:
return "CALENDAR"
case BACnetObjectType_COMMAND:
return "COMMAND"
case BACnetObjectType_DEVICE:
return "DEVICE"
case BACnetObjectType_EVENT_ENROLLMENT:
return "EVENT_ENROLLMENT"
case BACnetObjectType_FILE:
return "FILE"
case BACnetObjectType_GROUP:
return "GROUP"
case BACnetObjectType_LOOP:
return "LOOP"
case BACnetObjectType_MULTISTATE_INPUT:
return "MULTISTATE_INPUT"
case BACnetObjectType_MULTISTATE_OUTPUT:
return "MULTISTATE_OUTPUT"
case BACnetObjectType_NOTIFICATION_CLASS:
return "NOTIFICATION_CLASS"
case BACnetObjectType_PROGRAM:
return "PROGRAM"
case BACnetObjectType_SCHEDULE:
return "SCHEDULE"
case BACnetObjectType_AVERAGING:
return "AVERAGING"
case BACnetObjectType_MULTISTATE_VALUE:
return "MULTISTATE_VALUE"
case BACnetObjectType_TREND_LOG:
return "TREND_LOG"
case BACnetObjectType_LIFE_SAFETY_POINT:
return "LIFE_SAFETY_POINT"
case BACnetObjectType_LIFE_SAFETY_ZONE:
return "LIFE_SAFETY_ZONE"
case BACnetObjectType_ACCUMULATOR:
return "ACCUMULATOR"
case BACnetObjectType_PULSE_CONVERTER:
return "PULSE_CONVERTER"
case BACnetObjectType_EVENT_LOG:
return "EVENT_LOG"
case BACnetObjectType_GLOBAL_GROUP:
return "GLOBAL_GROUP"
case BACnetObjectType_TREND_LOG_MULTIPLE:
return "TREND_LOG_MULTIPLE"
case BACnetObjectType_LOAD_CONTROL:
return "LOAD_CONTROL"
case BACnetObjectType_STRUCTURED_VIEW:
return "STRUCTURED_VIEW"
case BACnetObjectType_ACCESS_DOOR:
return "ACCESS_DOOR"
case BACnetObjectType_TIMER:
return "TIMER"
case BACnetObjectType_ACCESS_CREDENTIAL:
return "ACCESS_CREDENTIAL"
case BACnetObjectType_ACCESS_POINT:
return "ACCESS_POINT"
case BACnetObjectType_ACCESS_RIGHTS:
return "ACCESS_RIGHTS"
case BACnetObjectType_ACCESS_USER:
return "ACCESS_USER"
case BACnetObjectType_ACCESS_ZONE:
return "ACCESS_ZONE"
case BACnetObjectType_CREDENTIAL_DATA_INPUT:
return "CREDENTIAL_DATA_INPUT"
case BACnetObjectType_NETWORK_SECURITY:
return "NETWORK_SECURITY"
case BACnetObjectType_BITSTRING_VALUE:
return "BITSTRING_VALUE"
case BACnetObjectType_CHARACTERSTRING_VALUE:
return "CHARACTERSTRING_VALUE"
case BACnetObjectType_DATEPATTERN_VALUE:
return "DATEPATTERN_VALUE"
case BACnetObjectType_DATE_VALUE:
return "DATE_VALUE"
case BACnetObjectType_DATETIMEPATTERN_VALUE:
return "DATETIMEPATTERN_VALUE"
case BACnetObjectType_DATETIME_VALUE:
return "DATETIME_VALUE"
case BACnetObjectType_INTEGER_VALUE:
return "INTEGER_VALUE"
case BACnetObjectType_LARGE_ANALOG_VALUE:
return "LARGE_ANALOG_VALUE"
case BACnetObjectType_OCTETSTRING_VALUE:
return "OCTETSTRING_VALUE"
case BACnetObjectType_POSITIVE_INTEGER_VALUE:
return "POSITIVE_INTEGER_VALUE"
case BACnetObjectType_TIMEPATTERN_VALUE:
return "TIMEPATTERN_VALUE"
case BACnetObjectType_TIME_VALUE:
return "TIME_VALUE"
case BACnetObjectType_NOTIFICATION_FORWARDER:
return "NOTIFICATION_FORWARDER"
case BACnetObjectType_ALERT_ENROLLMENT:
return "ALERT_ENROLLMENT"
case BACnetObjectType_CHANNEL:
return "CHANNEL"
case BACnetObjectType_LIGHTING_OUTPUT:
return "LIGHTING_OUTPUT"
case BACnetObjectType_BINARY_LIGHTING_OUTPUT:
return "BINARY_LIGHTING_OUTPUT"
case BACnetObjectType_NETWORK_PORT:
return "NETWORK_PORT"
case BACnetObjectType_ELEVATOR_GROUP:
return "ELEVATOR_GROUP"
case BACnetObjectType_ESCALATOR:
return "ESCALATOR"
}
return ""
}
func (e BACnetObjectType) String() string {
return e.name()
}
func (m BACnetObjectType) Box(s string, i int) utils.AsciiBox {
boxName := "BACnetObjectType"
if s != "" {
boxName += "/" + s
}
return utils.BoxString(boxName, fmt.Sprintf("%#0*x %s", 3, uint16(m), m.name()), -1)
}
| {
switch value {
case "ANALOG_INPUT":
return BACnetObjectType_ANALOG_INPUT
case "ANALOG_OUTPUT":
return BACnetObjectType_ANALOG_OUTPUT
case "ANALOG_VALUE":
return BACnetObjectType_ANALOG_VALUE
case "BINARY_INPUT":
return BACnetObjectType_BINARY_INPUT
case "BINARY_OUTPUT":
return BACnetObjectType_BINARY_OUTPUT
case "BINARY_VALUE":
return BACnetObjectType_BINARY_VALUE
case "CALENDAR":
return BACnetObjectType_CALENDAR
case "COMMAND":
return BACnetObjectType_COMMAND
case "DEVICE":
return BACnetObjectType_DEVICE
case "EVENT_ENROLLMENT":
return BACnetObjectType_EVENT_ENROLLMENT
case "FILE":
return BACnetObjectType_FILE
case "GROUP":
return BACnetObjectType_GROUP
case "LOOP":
return BACnetObjectType_LOOP
case "MULTISTATE_INPUT":
return BACnetObjectType_MULTISTATE_INPUT
case "MULTISTATE_OUTPUT":
return BACnetObjectType_MULTISTATE_OUTPUT
case "NOTIFICATION_CLASS":
return BACnetObjectType_NOTIFICATION_CLASS
case "PROGRAM":
return BACnetObjectType_PROGRAM
case "SCHEDULE":
return BACnetObjectType_SCHEDULE
case "AVERAGING":
return BACnetObjectType_AVERAGING
case "MULTISTATE_VALUE":
return BACnetObjectType_MULTISTATE_VALUE
case "TREND_LOG":
return BACnetObjectType_TREND_LOG
case "LIFE_SAFETY_POINT":
return BACnetObjectType_LIFE_SAFETY_POINT
case "LIFE_SAFETY_ZONE":
return BACnetObjectType_LIFE_SAFETY_ZONE
case "ACCUMULATOR":
return BACnetObjectType_ACCUMULATOR
case "PULSE_CONVERTER":
return BACnetObjectType_PULSE_CONVERTER
case "EVENT_LOG":
return BACnetObjectType_EVENT_LOG
case "GLOBAL_GROUP":
return BACnetObjectType_GLOBAL_GROUP
case "TREND_LOG_MULTIPLE":
return BACnetObjectType_TREND_LOG_MULTIPLE
case "LOAD_CONTROL":
return BACnetObjectType_LOAD_CONTROL
case "STRUCTURED_VIEW":
return BACnetObjectType_STRUCTURED_VIEW
case "ACCESS_DOOR":
return BACnetObjectType_ACCESS_DOOR
case "TIMER":
return BACnetObjectType_TIMER
case "ACCESS_CREDENTIAL":
return BACnetObjectType_ACCESS_CREDENTIAL
case "ACCESS_POINT":
return BACnetObjectType_ACCESS_POINT
case "ACCESS_RIGHTS":
return BACnetObjectType_ACCESS_RIGHTS
case "ACCESS_USER":
return BACnetObjectType_ACCESS_USER
case "ACCESS_ZONE":
return BACnetObjectType_ACCESS_ZONE
case "CREDENTIAL_DATA_INPUT":
return BACnetObjectType_CREDENTIAL_DATA_INPUT
case "NETWORK_SECURITY":
return BACnetObjectType_NETWORK_SECURITY
case "BITSTRING_VALUE":
return BACnetObjectType_BITSTRING_VALUE
case "CHARACTERSTRING_VALUE":
return BACnetObjectType_CHARACTERSTRING_VALUE
case "DATEPATTERN_VALUE":
return BACnetObjectType_DATEPATTERN_VALUE
case "DATE_VALUE":
return BACnetObjectType_DATE_VALUE
case "DATETIMEPATTERN_VALUE":
return BACnetObjectType_DATETIMEPATTERN_VALUE
case "DATETIME_VALUE":
return BACnetObjectType_DATETIME_VALUE
case "INTEGER_VALUE":
return BACnetObjectType_INTEGER_VALUE
case "LARGE_ANALOG_VALUE":
return BACnetObjectType_LARGE_ANALOG_VALUE
case "OCTETSTRING_VALUE":
return BACnetObjectType_OCTETSTRING_VALUE
case "POSITIVE_INTEGER_VALUE":
return BACnetObjectType_POSITIVE_INTEGER_VALUE
case "TIMEPATTERN_VALUE":
return BACnetObjectType_TIMEPATTERN_VALUE
case "TIME_VALUE":
return BACnetObjectType_TIME_VALUE
case "NOTIFICATION_FORWARDER":
return BACnetObjectType_NOTIFICATION_FORWARDER
case "ALERT_ENROLLMENT":
return BACnetObjectType_ALERT_ENROLLMENT
case "CHANNEL":
return BACnetObjectType_CHANNEL
case "LIGHTING_OUTPUT":
return BACnetObjectType_LIGHTING_OUTPUT
case "BINARY_LIGHTING_OUTPUT":
return BACnetObjectType_BINARY_LIGHTING_OUTPUT
case "NETWORK_PORT":
return BACnetObjectType_NETWORK_PORT
case "ELEVATOR_GROUP":
return BACnetObjectType_ELEVATOR_GROUP
case "ESCALATOR":
return BACnetObjectType_ESCALATOR
}
return 0
} |
agent_ddpg.py | import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from model_ddpg import Actor, Critic
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
BUFFER_SIZE = int(1e6) # replay buffer size
START_SIZE = 1024 # when to start training
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-3 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
TRAIN_EVERY = 5 # how often to train a batch
TRAIN_STEPS = 3 # how many training steps when a batch is trained
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):
"""Initialize an Agent object.
Params
======
num_agents (int): number of agents
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
use_per (bool): whether to use prioritized replay buffer
"""
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.use_per = use_per
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
if use_per:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)
# Initialize time step
self.t_step = 0
def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):
# Get max predicted Q values (for next states) from target model
if is_train:
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
else:
self.actor_local.eval()
self.actor_target.eval()
self.critic_local.eval()
self.critic_target.eval()
with torch.no_grad():
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
self.actor_local.train()
self.actor_target.train()
self.critic_local.train()
self.critic_target.train()
return Q_expected, Q_targets
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
if self.use_per:
# Convert numpy array to torch tensor
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)
# Get max predicted Q values (for next states) from target model
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)
# Convert torch tensor to numpy array
states = states.cpu().data.numpy()
actions = actions.cpu().data.numpy()
rewards = rewards.cpu().data.numpy().squeeze(1).tolist()
next_states = next_states.cpu().data.numpy()
dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()
# Calculate error
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze(1)
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])
else:
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])
# Update time step
self.t_step += 1
# If enough samples are available in memory,
if len(self.memory) >= START_SIZE:
# Get random subset and learn every TRAIN_EVERY time steps,
if self.t_step % TRAIN_EVERY == 0:
for _ in range(TRAIN_STEPS):
if self.use_per:
experiences, idx_tree, is_weight = self.memory.sample()
self.learn(experiences, GAMMA, idx_tree, is_weight)
else:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, states, add_noise=True):
"""Returns epsilon-greedy actions for given state as per current policy."""
states = torch.from_numpy(states).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(states).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)
return np.clip(actions, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma, idx_tree=None, is_weight=None):
|
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state | """Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)
# Compute critic loss
if self.use_per:
assert ((is_weight is not None) and (is_weight.size > 0))
is_weight = torch.from_numpy(is_weight).float().to(device)
critic_loss = (is_weight * F.smooth_l1_loss(Q_expected, Q_targets, reduction='none').squeeze()).mean()
else:
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
# torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # use gradient norm clipping
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
# update priority
if self.use_per:
assert((idx_tree is not None) and (len(idx_tree) > 0))
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze()
for i in range(self.memory.batch_size):
self.memory.update(idx_tree[i], errors[i])
|
client.rs | use crate::download::AsyncDownload;
use crate::download::DownloadClient;
use crate::traits::*;
use crate::uploadsession::UploadSessionClient;
use crate::url::GraphUrl;
use crate::{
GraphRequest, GraphResponse, HttpClient, Registry, RequestAttribute, RequestClient, RequestType,
};
use graph_core::resource::ResourceIdentity;
use graph_error::WithGraphErrorAsync;
use graph_error::{GraphFailure, GraphResult};
use handlebars::Handlebars;
use parking_lot::Mutex;
use reqwest::header::{HeaderMap, HeaderValue, IntoHeaderName, CONTENT_TYPE};
use reqwest::redirect::Policy;
use reqwest::Method;
use std::fmt::{Debug, Formatter};
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use url::Url;
pub(crate) type AsyncClient =
GraphRequest<reqwest::Client, reqwest::Body, reqwest::multipart::Form>;
impl AsyncClient {
pub fn new_async(url: GraphUrl) -> AsyncClient {
let mut headers = HeaderMap::default();
headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
AsyncClient {
token: Default::default(),
ident: Default::default(),
client: reqwest::Client::builder()
.redirect(Policy::limited(2))
.build()
.map_err(GraphFailure::from)
.unwrap(),
url,
method: Default::default(),
body: None,
headers,
upload_session_file: None,
download_dir: None,
form: None,
req_type: Default::default(),
registry: Handlebars::new(),
timeout: Duration::from_secs(30),
}
}
pub fn inner_client(&mut self) -> &mut reqwest::Client {
&mut self.client
}
pub fn download(&mut self) -> AsyncDownload {
let request = self.clone();
DownloadClient::new_async(request)
}
pub async fn upload_session(&mut self) -> GraphResult<UploadSessionClient<AsyncHttpClient>> {
let file = self
.upload_session_file
.take()
.ok_or_else(|| GraphFailure::invalid("file for upload session"))?;
let response = self.response().await?.with_graph_error().await?;
let upload_session: serde_json::Value = response.json().await?;
let mut session = UploadSessionClient::new_async(upload_session)?;
session.set_file(file).await?;
Ok(session)
}
pub fn build_upload_session(&mut self) -> (Option<PathBuf>, reqwest::RequestBuilder) {
let file = self.upload_session_file.take();
let builder = self.build();
(file, builder)
}
pub fn build(&mut self) -> reqwest::RequestBuilder {
let headers = self.headers.clone();
self.headers.clear();
self.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
let builder = self
.client
.request(self.method.clone(), self.url.as_str())
.timeout(self.timeout)
.headers(headers)
.bearer_auth(self.token.as_str());
match self.req_type {
RequestType::Basic | RequestType::Redirect => {
if self.body.is_some() {
builder.body(self.body.take().unwrap())
} else {
builder
}
}
RequestType::Multipart => builder.multipart(self.form.take().unwrap()),
}
}
/// Builds the request and sends it.
///
/// Requests that require a redirect are automatic so we don't need
/// to do anything special for these requests.
pub async fn response(&mut self) -> GraphResult<reqwest::Response> {
let builder = self.build();
let response = builder.send().await?;
Ok(response)
}
/// Builds the requests and sends it, converting to a GraphResponse and deserializing
/// the body.
pub async fn execute<T>(&mut self) -> GraphResult<GraphResponse<T>>
where
for<'de> T: serde::Deserialize<'de>,
{
let builder = self.build();
let response = builder.send().await?;
AsyncTryFrom::<reqwest::Response>::async_try_from(response).await
}
pub fn clone(&mut self) -> Self {
GraphRequest {
token: self.token.to_string(),
ident: self.ident,
client: reqwest::Client::builder()
.redirect(Policy::limited(2))
.build()
.map_err(GraphFailure::from)
.unwrap(),
url: self.url.clone(),
method: self.method.clone(),
body: self.body.take(),
headers: self.headers.clone(),
upload_session_file: self.upload_session_file.take(),
download_dir: self.download_dir.take(),
form: self.form.take(),
req_type: self.req_type,
registry: Handlebars::new(),
timeout: Duration::from_secs(30),
}
}
fn register_ident_helper(&mut self, resource_identity: ResourceIdentity) {
Registry::register_internal_helper(resource_identity, &mut self.registry);
}
}
impl Default for AsyncClient {
fn default() -> Self {
AsyncClient::new_async(GraphUrl::parse("https://graph.microsoft.com/v1.0").unwrap())
}
}
impl From<Url> for AsyncClient {
fn from(url: Url) -> Self {
AsyncClient::new_async(GraphUrl::from(url))
}
}
pub type AsyncHttpClient = HttpClient<Arc<Mutex<AsyncClient>>>;
impl AsyncHttpClient {
pub fn new(url: GraphUrl) -> AsyncHttpClient {
AsyncHttpClient {
client: Arc::new(Mutex::new(AsyncClient::new_async(url))),
}
}
pub fn clone_inner(&self) -> Arc<Mutex<AsyncClient>> {
Arc::clone(&self.client)
}
pub async fn download(&self) -> AsyncDownload {
self.client.lock().download()
}
pub async fn upload_session(&self) -> GraphResult<UploadSessionClient<AsyncHttpClient>> {
self.client.lock().upload_session().await
}
pub async fn build_upload_session(&self) -> (Option<PathBuf>, reqwest::RequestBuilder) {
self.client.lock().build_upload_session()
}
pub async fn build(&self) -> reqwest::RequestBuilder {
self.client.lock().build()
}
pub async fn response(&self) -> GraphResult<reqwest::Response> {
let mut client = self.client.lock();
let response = client.response();
response.await
}
pub async fn execute<T>(&self) -> GraphResult<GraphResponse<T>>
where
for<'de> T: serde::Deserialize<'de>,
{
self.client.lock().execute().await
}
}
impl RequestClient for AsyncHttpClient {
type Body = reqwest::Body;
type Form = reqwest::multipart::Form;
fn token(&self) -> String {
self.client.lock().token.clone()
}
fn set_token(&self, token: &str) {
self.client.lock().token = token.to_string();
}
fn ident(&self) -> ResourceIdentity {
self.client.lock().ident
}
fn set_ident(&self, ident: ResourceIdentity) {
self.client.lock().ident = ident;
}
fn url(&self) -> GraphUrl {
self.client.lock().url.clone()
}
fn | (&self) -> Url {
self.client.lock().url.to_url()
}
fn set_url(&self, url: GraphUrl) {
self.client.lock().url = url;
}
fn method(&self) -> Method {
self.client.lock().method.clone()
}
fn set_method(&self, method: Method) {
self.client.lock().method = method;
}
fn set_body<T: Into<reqwest::Body>>(&self, body: T) {
self.client.lock().body = Some(body.into());
}
fn set_body_with_file(&self, path: PathBuf) -> GraphResult<()> {
let mut file = File::open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
self.client.lock().body = Some(buffer.into());
Ok(())
}
fn header<T: IntoHeaderName>(&self, name: T, value: HeaderValue) {
self.client.lock().headers.insert(name, value);
}
fn set_header_map(&self, header_map: HeaderMap) {
self.client.lock().headers = header_map;
}
fn clear_headers(&self) {
self.client.lock().headers.clear();
}
fn set_download_dir(&self, dir: PathBuf) {
self.client.lock().download_dir = Some(dir);
}
fn set_upload_session(&self, file: PathBuf) {
self.client.lock().upload_session_file = Some(file);
}
fn set_form(&self, form: reqwest::multipart::Form) {
let mut client = self.client.lock();
client.form = Some(form);
client.req_type = RequestType::Multipart;
}
fn set_request_type(&self, req_type: RequestType) {
self.client.lock().req_type = req_type;
}
fn request_type(&self) -> RequestType {
self.client.lock().req_type
}
fn url_ref<F>(&self, f: F)
where
F: Fn(&GraphUrl) + Sync,
{
f(&self.client.lock().url)
}
fn url_mut<F>(&self, f: F)
where
F: Fn(&mut GraphUrl) + Sync,
{
f(&mut self.client.lock().url)
}
fn registry<F>(&self, f: F)
where
F: Fn(&mut Handlebars) + Sync,
{
f(&mut self.client.lock().registry)
}
fn render_template(&self, template: &str, json: &serde_json::Value) -> String {
self.client
.lock()
.registry
.render_template(template, json)
.unwrap()
}
fn register_ident_helper(&self, resource_identity: ResourceIdentity) {
self.client.lock().register_ident_helper(resource_identity);
}
fn extend_path(&self, path: &[&str]) {
self.client.lock().url.extend_path(path);
}
fn set_request(
&self,
req_att: Vec<RequestAttribute<reqwest::Body, reqwest::multipart::Form>>,
) -> GraphResult<()> {
for att in req_att {
let mut client = self.client.lock();
match att {
RequestAttribute::Token(token) => client.token = token,
RequestAttribute::Ident(ident) => client.ident = ident,
RequestAttribute::Url(url) => client.url = url,
RequestAttribute::Method(method) => client.method = method,
RequestAttribute::Body(body) => client.body = Some(body),
RequestAttribute::BodyFile(path) => {
let mut file = File::open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
client.body = Some(buffer.into());
}
RequestAttribute::Headers(headers) => client.headers = headers,
RequestAttribute::ClearHeaders => client.headers.clear(),
RequestAttribute::Download(path) => client.download_dir = Some(path),
RequestAttribute::Upload(path) => client.upload_session_file = Some(path),
RequestAttribute::Form(form) => client.form = Some(form),
RequestAttribute::RequestType(req_type) => client.req_type = req_type,
}
}
Ok(())
}
fn set_timeout(&self, duration: Duration) {
self.client.lock().timeout = duration;
}
}
impl Debug for AsyncHttpClient {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.client.lock().fmt(f)
}
}
impl From<AsyncClient> for AsyncHttpClient {
fn from(client: AsyncClient) -> Self {
AsyncHttpClient {
client: Arc::new(Mutex::new(client)),
}
}
}
| to_url |
parser_trait.rs | // Copyright (c) 2019, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use crate::lexer::{self, Lexer};
use crate::parser_env::ParserEnv;
use crate::smart_constructors::{NodeType, SmartConstructors, Token, Trivia};
use parser_core_types::{
lexable_token::LexableToken,
lexable_trivia::LexableTrivia,
syntax_error::{self as Errors, Error, SyntaxError},
token_factory::TokenFactory,
token_kind::TokenKind,
trivia_factory::TriviaFactory,
};
use stack_limit::StackLimit;
#[derive(PartialEq)]
pub enum SeparatedListKind {
NoTrailing,
TrailingAllowed,
ItemsOptional,
}
// This could be a set of token kinds, but it's part of parser envirnoment that is often cloned,
// so trying to keep it small.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ExpectedTokens {
Classish = 0b0001,
Semicolon = 0b0010,
RightParen = 0b0100,
Visibility = 0b1000,
}
const ET_COUNT: u32 = 4;
const ET_MASK: ETMask = (1 << ET_COUNT) - 1;
type ETMask = u16; // mask of bits in first ET_COUNT bits
impl ExpectedTokens {
pub fn contains(mask: ETMask, token: TokenKind) -> bool {
use ExpectedTokens::*;
let bit: ETMask = match token {
TokenKind::Class | TokenKind::Trait | TokenKind::Interface => Classish as ETMask,
TokenKind::Semicolon => Semicolon as ETMask,
TokenKind::RightParen => RightParen as ETMask,
TokenKind::Public | TokenKind::Protected | TokenKind::Private => Visibility as ETMask,
_ => 0_u16,
};
(bit & mask) != 0
}
fn from(bit: ETMask) -> ExpectedTokens {
// debug_assert!((bit & (!bit+1)) == bit, "unexpected multiple set bits in {:#b}");
use ExpectedTokens::*;
match bit {
0b0001 => Classish,
0b0010 => Semicolon,
0b0100 => RightParen,
_ => Visibility,
}
}
}
#[derive(Debug, Clone)]
pub struct ExpectedTokenVec(Vec<ETMask>);
impl ExpectedTokenVec {
fn push(&mut self, et: ExpectedTokens) {
let last_mask = *self.0.last().unwrap_or(&0) & ET_MASK;
let bit = et as ETMask;
self.0.push(bit | last_mask | (bit << ET_COUNT));
}
fn pop(&mut self) -> Option<ExpectedTokens> {
self.0.pop().map(|x| ExpectedTokens::from(x >> ET_COUNT))
}
fn last_mask(&self) -> ETMask {
self.0.last().map_or(0, |x| x >> ET_COUNT)
}
fn any_mask(&self) -> ETMask {
self.0.last().map_or(0, |x| x & ET_MASK)
}
}
#[derive(Debug, Clone)]
pub struct Context<'a, T> {
pub expected: ExpectedTokenVec,
pub skipped_tokens: Vec<T>,
stack_limit: Option<&'a StackLimit>,
}
impl<'a, T> Context<'a, T> {
pub fn empty(stack_limit: Option<&'a StackLimit>) -> Self {
Self {
expected: ExpectedTokenVec(vec![]),
skipped_tokens: vec![],
stack_limit,
}
}
fn expect_in_new_scope(&mut self, expected: ExpectedTokens) {
self.expected.push(expected);
}
fn pop_scope(&mut self, expected: ExpectedTokens) {
let scope = self.expected.pop().unwrap();
assert_eq!(expected, scope)
}
fn expects(&self, token_kind: TokenKind) -> bool {
ExpectedTokens::contains(self.expected.any_mask(), token_kind)
}
fn expects_here(&self, token_kind: TokenKind) -> bool {
ExpectedTokens::contains(self.expected.last_mask(), token_kind)
}
}
pub trait ParserTrait<'a, S>: Clone
where
S: SmartConstructors,
<S as SmartConstructors>::R: NodeType,
{
fn make(
_: Lexer<'a, S::TF>,
_: ParserEnv,
_: Context<'a, Token<S>>,
_: Vec<SyntaxError>,
_: S,
) -> Self;
fn add_error(&mut self, _: SyntaxError);
fn into_parts(self) -> (Lexer<'a, S::TF>, Context<'a, Token<S>>, Vec<SyntaxError>, S);
fn lexer(&self) -> &Lexer<'a, S::TF>;
fn lexer_mut(&mut self) -> &mut Lexer<'a, S::TF>;
fn continue_from<P: ParserTrait<'a, S>>(&mut self, _: P);
fn env(&self) -> &ParserEnv;
fn sc_mut(&mut self) -> &mut S;
fn skipped_tokens(&self) -> &[Token<S>];
fn drain_skipped_tokens(&mut self) -> std::vec::Drain<'_, Token<S>>;
fn context_mut(&mut self) -> &mut Context<'a, Token<S>>;
fn context(&self) -> &Context<'a, Token<S>>;
fn pos(&self) -> usize {
self.lexer().offset()
}
fn | (&mut self, token: Token<S>) {
self.context_mut().skipped_tokens.push(token)
}
fn expects(&self, kind: TokenKind) -> bool {
self.context().expects(kind)
}
fn expects_here(&self, kind: TokenKind) -> bool {
self.context().expects_here(kind)
}
fn expect_in_new_scope(&mut self, expected: ExpectedTokens) {
self.context_mut().expect_in_new_scope(expected)
}
fn pop_scope(&mut self, expected: ExpectedTokens) {
self.context_mut().pop_scope(expected)
}
// This function reports an error starting at the current location of the
// parser. Setting on_whole_token=false will report the error only on trivia,
// which is useful in cases such as when "a semicolon is expected here" before
// the current node. However, setting on_whole_token=true will report the error
// only on the non-trivia text of the next token parsed, which is useful
// in cases like "flagging an entire token as an extra".
fn with_error_impl(&mut self, on_whole_token: bool, message: Error) {
let (start_offset, end_offset) = self.error_offsets(on_whole_token);
let error = SyntaxError::make(start_offset, end_offset, message);
self.add_error(error)
}
fn with_error(&mut self, message: Error) {
self.with_error_impl(false, message)
}
fn with_error_on_whole_token(&mut self, message: Error) {
self.with_error_impl(true, message)
}
fn next_token_with_tokenizer<F>(&mut self, tokenizer: F) -> Token<S>
where
F: Fn(&mut Lexer<'a, S::TF>) -> Token<S>,
{
let token = tokenizer(self.lexer_mut());
if !self.skipped_tokens().is_empty() {
let start = self.lexer().start();
let mut leading = self
.sc_mut()
.token_factory_mut()
.trivia_factory_mut()
.make();
for t in self.drain_skipped_tokens() {
let (t_leading, t_width, t_trailing) = t.into_trivia_and_width();
leading.extend(t_leading);
leading.push(Trivia::<S>::make_extra_token_error(start, t_width));
leading.extend(t_trailing);
}
leading.extend(token.clone_leading());
self.sc_mut()
.token_factory_mut()
.with_leading(token, leading)
} else {
token
}
}
fn next_token(&mut self) -> Token<S> {
self.next_token_with_tokenizer(|x| x.next_token())
}
fn next_token_no_trailing(&mut self) -> Token<S> {
self.lexer_mut().next_token_no_trailing()
}
fn next_docstring_header(&mut self) -> (Token<S>, &'a [u8]) {
self.lexer_mut().next_docstring_header()
}
fn next_token_in_string(&mut self, literal_kind: &lexer::StringLiteralKind) -> Token<S> {
self.lexer_mut().next_token_in_string(literal_kind)
}
fn next_xhp_class_name_or_other(&mut self) -> S::R {
let token = self.next_xhp_class_name_or_other_token();
match token.kind() {
TokenKind::Namespace | TokenKind::Name => {
let name_token = S!(make_token, self, token);
self.scan_remaining_qualified_name(name_token)
}
TokenKind::Backslash => {
let missing = S!(make_missing, self, self.pos());
let backslash = S!(make_token, self, token);
self.scan_qualified_name(missing, backslash)
}
_ => S!(make_token, self, token),
}
}
fn next_xhp_children_name_or_other(&mut self) -> Token<S> {
if self.is_next_xhp_category_name() {
self.next_xhp_category_name()
} else if self.env().enable_xhp_class_modifier {
self.next_xhp_modifier_class_name_or_other_token()
} else {
self.next_xhp_class_name_or_other_token()
}
}
// Used in conjunction with the following function. If you call next_token
// when the parser is at the <<<, it will scan the entire file looking for an
// ending to the heredoc, which could quickly get bad if there are many such
// declarations in a file.
fn peek_next_partial_token_is_triple_left_angle(&self) -> bool {
let mut lexer = self.lexer().clone();
lexer.scan_leading_php_trivia();
let tparam_open = lexer.peek_char(0);
let attr1 = lexer.peek_char(1);
let attr2 = lexer.peek_char(2);
tparam_open == '<' && attr1 == '<' && attr2 == '<'
}
// Type parameter/argument lists begin with < and can have attributes immediately
// afterwards, so this peeks a token kind at the beginning of such a list. *)
fn peek_token_kind_with_possible_attributized_type_list(&self) -> TokenKind {
if self.peek_next_partial_token_is_triple_left_angle() {
TokenKind::LessThan
} else {
self.peek_token_kind()
}
}
// In the case of attributes on generics, one could write
// function f<<<__Attr>> reify T, ...> or Awaitable<<<__Soft>> int>
// The triple left angle is currently lexed as a HeredocStringLiteral,
// but we can get around this by manually advancing the lexer one token
// and returning a LeftAngle. Then, the next token will be a LeftAngleLeftAngle
fn assert_left_angle_in_type_list_with_possible_attribute(&mut self) -> S::R {
let parser1 = self.clone();
let lexer = self.lexer_mut();
lexer.scan_leading_php_trivia();
let tparam_open = lexer.peek_char(0);
let attr1 = lexer.peek_char(1);
let attr2 = lexer.peek_char(2);
if tparam_open == '<' && attr1 == '<' && attr2 == '<' {
lexer.advance(1);
let start = lexer.start();
let token_factory = self.sc_mut().token_factory_mut();
let leading = token_factory.trivia_factory_mut().make();
let trailing = token_factory.trivia_factory_mut().make();
let token = token_factory.make(TokenKind::LessThan, start, 1, leading, trailing);
S!(make_token, self, token)
} else {
self.continue_from(parser1);
self.assert_token(TokenKind::LessThan)
}
}
fn assert_xhp_body_token(&mut self, kind: TokenKind) -> S::R {
self.assert_token_with_tokenizer(kind, |x: &mut Lexer<'a, S::TF>| x.next_xhp_body_token())
}
fn peek_token_with_lookahead(&self, lookahead: usize) -> Token<S> {
let mut lexer = self.lexer().clone();
let mut i = 0;
loop {
if i == lookahead {
// call peek_next_token instead of next_token for the last one to leverage
// lexer caching
return lexer.peek_next_token();
}
let _ = lexer.next_token();
i += 1
}
}
fn peek_token(&self) -> Token<S> {
self.lexer().peek_next_token()
}
fn peek_token_kind(&self) -> TokenKind {
self.peek_token().kind()
}
fn peek_token_kind_with_lookahead(&self, lookahead: usize) -> TokenKind {
self.peek_token_with_lookahead(lookahead).kind()
}
fn fetch_token(&mut self) -> S::R {
let token = self.lexer_mut().next_token();
S!(make_token, self, token)
}
fn assert_token_with_tokenizer<F>(&mut self, kind: TokenKind, tokenizer: F) -> S::R
where
F: Fn(&mut Lexer<'a, S::TF>) -> Token<S>,
{
let token = self.next_token_with_tokenizer(tokenizer);
if token.kind() != kind {
panic!(
"Expected {:?}, but got {:?}. This indicates a bug in the parser, regardless of how broken the input code is.",
kind,
token.kind()
)
}
S!(make_token, self, token)
}
fn assert_token(&mut self, kind: TokenKind) -> S::R {
self.assert_token_with_tokenizer(kind, |x: &mut Lexer<'_, S::TF>| x.next_token())
}
fn token_text(&self, token: &Token<S>) -> &'a str {
match token.leading_start_offset() {
None => "", // unavailable for minimal tokens
Some(leading_start_offset) => unsafe {
std::str::from_utf8_unchecked(
self.lexer()
.source()
.sub(leading_start_offset + token.leading_width(), token.width()),
)
},
}
}
fn current_token_text(&self) -> &'a str {
self.token_text(&self.peek_token())
}
// If the next token is a name or keyword, scan it as a name.
fn next_token_as_name(&mut self) -> Token<S> {
// TODO: This isn't right. Pass flags to the lexer.
self.lexer_mut().next_token_as_name()
}
fn optional_token(&mut self, kind: TokenKind) -> S::R {
if self.peek_token_kind() == kind {
let token = self.next_token();
S!(make_token, self, token)
} else {
S!(make_missing, self, self.pos())
}
}
fn scan_qualified_name_worker(
&mut self,
mut name_opt: Option<S::R>,
mut parts: Vec<S::R>,
mut has_backslash: bool,
) -> (Vec<S::R>, Option<S::R>, bool) {
loop {
let mut parser1 = self.clone();
let token = if parser1.is_next_xhp_class_name() {
parser1.next_xhp_class_name()
} else {
parser1.next_token_as_name()
};
match (name_opt.is_some(), token.kind()) {
(true, TokenKind::Backslash) => {
// found backslash, create item and recurse
self.continue_from(parser1);
let token = S!(make_token, self, token);
let part = S!(make_list_item, self, name_opt.unwrap(), token);
parts.push(part);
has_backslash = true;
name_opt = None;
}
(false, TokenKind::Name) => {
// found a name, recurse to look for backslash
self.continue_from(parser1);
let token = S!(make_token, self, token);
name_opt = Some(token);
has_backslash = false;
}
(true, _) if parts.is_empty() => {
// have not found anything - return [] to indicate failure
return (parts, name_opt, false);
}
(true, _) => {
// next token is not part of qualified name but we've consume some
// part of the input - create part for name with missing backslash
// and return accumulated result
let missing = S!(make_missing, self, self.pos());
let part = S!(make_list_item, self, name_opt.unwrap(), missing);
// TODO(T25649779)
parts.push(part);
return (parts, None, false);
}
_ => {
// next token is not part of qualified name - return accumulated result
return (parts, name_opt, has_backslash);
}
}
}
}
fn scan_remaining_qualified_name_extended(&mut self, name_token: S::R) -> (S::R, bool) {
let (parts, name_token_opt, is_backslash) =
self.scan_qualified_name_worker(Some(name_token), vec![], false);
if parts.is_empty() {
(name_token_opt.unwrap(), is_backslash)
} else {
let list_node = S!(make_list, self, parts, self.pos());
let name = S!(make_qualified_name, self, list_node);
(name, is_backslash)
}
}
fn scan_qualified_name_extended(&mut self, missing: S::R, backslash: S::R) -> (S::R, bool) {
let head = S!(make_list_item, self, missing, backslash);
let parts = vec![head];
let (parts, _, is_backslash) = self.scan_qualified_name_worker(None, parts, false);
let list_node = S!(make_list, self, parts, self.pos());
let name = S!(make_qualified_name, self, list_node);
(name, is_backslash)
}
fn scan_qualified_name(&mut self, missing: S::R, backslash: S::R) -> S::R {
let (name, _) = self.scan_qualified_name_extended(missing, backslash);
name
}
// If the next token is a name or an non-reserved keyword, scan it as
// a name otherwise as a keyword.
//
// NB: A "reserved" keyword is in practice a keyword that cannot be used
// as a class name or function name, for example, control flow keywords or
// declaration keywords are reserved.
fn next_token_non_reserved_as_name(&mut self) -> Token<S> {
self.next_token_with_tokenizer(|l| l.next_token_non_reserved_as_name())
}
fn scan_header(&mut self) -> (Option<Token<S>>, Option<(Token<S>, Option<Token<S>>)>) {
self.lexer_mut().scan_header()
}
fn error_offsets(&mut self, on_whole_token: bool /* = false */) -> (usize, usize) {
if on_whole_token {
let token = self.peek_token();
let start_offset = self.lexer().offset() + token.leading_width();
let end_offset = start_offset + token.width();
(start_offset, end_offset)
} else {
let start_offset = self.lexer().start();
let end_offset = self.lexer().offset();
(start_offset, end_offset)
}
}
fn scan_name_or_qualified_name(&mut self) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token_non_reserved_as_name();
match token.kind() {
TokenKind::Namespace | TokenKind::Name => {
self.continue_from(parser1);
let token = S!(make_token, self, token);
self.scan_remaining_qualified_name(token)
}
TokenKind::Backslash => {
self.continue_from(parser1);
let missing = S!(make_missing, self, self.pos());
let token = S!(make_token, self, token);
self.scan_qualified_name(missing, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_alternate_if_block<F>(&mut self, parse_item: F) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
let mut parser1 = self.clone();
let block = parser1.parse_list_while(parse_item, |x: &Self| match x.peek_token_kind() {
TokenKind::Elseif | TokenKind::Else | TokenKind::Endif => false,
_ => true,
});
if block.is_missing() {
let empty1 = S!(make_missing, self, self.pos());
let empty2 = S!(make_missing, self, self.pos());
let es = S!(make_expression_statement, self, empty1, empty2);
S!(make_list, self, vec![es], self.pos())
} else {
self.continue_from(parser1);
block
}
}
fn parse_separated_list<F>(
&mut self,
separator_kind: TokenKind,
allow_trailing: SeparatedListKind,
close_kind: TokenKind,
error: Error,
parse_item: F,
) -> (S::R, bool)
where
F: Fn(&mut Self) -> S::R,
{
let (x, y, _) = self.parse_separated_list_predicate(
|x| x == separator_kind,
allow_trailing,
|x| x == close_kind,
error,
parse_item,
);
(x, y)
}
fn require_qualified_name(&mut self) -> S::R {
let mut parser1 = self.clone();
let name = if parser1.is_next_xhp_class_name() {
parser1.next_xhp_class_name()
} else {
parser1.next_token_non_reserved_as_name()
};
match name.kind() {
TokenKind::Namespace | TokenKind::Name | TokenKind::XHPClassName => {
self.continue_from(parser1);
let token = S!(make_token, self, name);
self.scan_remaining_qualified_name(token)
}
TokenKind::Backslash => {
self.continue_from(parser1);
let missing = S!(make_missing, self, self.pos());
let backslash = S!(make_token, self, name);
self.scan_qualified_name(missing, backslash)
}
_ => {
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
}
}
fn require_name(&mut self) -> S::R {
self.require_token(TokenKind::Name, Errors::error1004)
}
fn require_xhp_class_name(&mut self) -> S::R {
let token = self.next_xhp_modifier_class_name();
S!(make_token, self, token)
}
fn require_xhp_class_name_or_name(&mut self) -> S::R {
if self.is_next_xhp_class_name() {
let token = self.next_xhp_class_name();
S!(make_token, self, token)
} else {
self.require_token(TokenKind::Name, Errors::error1004)
}
}
fn require_class_name(&mut self) -> S::R {
if self.is_next_xhp_class_name() {
let token = self.next_xhp_class_name();
S!(make_token, self, token)
} else {
self.require_name_allow_non_reserved()
}
}
fn require_function(&mut self) -> S::R {
self.require_token(TokenKind::Function, Errors::error1003)
}
fn require_variable(&mut self) -> S::R {
self.require_token(TokenKind::Variable, Errors::error1008)
}
fn require_colon(&mut self) -> S::R {
self.require_token(TokenKind::Colon, Errors::error1020)
}
fn require_left_brace(&mut self) -> S::R {
self.require_token(TokenKind::LeftBrace, Errors::error1034)
}
fn require_slashgt(&mut self) -> S::R {
self.require_token(TokenKind::SlashGreaterThan, Errors::error1029)
}
fn require_right_brace(&mut self) -> S::R {
self.require_token(TokenKind::RightBrace, Errors::error1006)
}
fn require_left_paren(&mut self) -> S::R {
self.require_token(TokenKind::LeftParen, Errors::error1019)
}
fn require_left_angle(&mut self) -> S::R {
self.require_token(TokenKind::LessThan, Errors::error1021)
}
fn require_right_angle(&mut self) -> S::R {
self.require_token(TokenKind::GreaterThan, Errors::error1013)
}
fn require_comma(&mut self) -> S::R {
self.require_token(TokenKind::Comma, Errors::error1054)
}
fn require_right_bracket(&mut self) -> S::R {
self.require_token(TokenKind::RightBracket, Errors::error1032)
}
fn require_equal(&mut self) -> S::R {
self.require_token(TokenKind::Equal, Errors::error1036)
}
fn require_arrow(&mut self) -> S::R {
self.require_token(TokenKind::EqualGreaterThan, Errors::error1028)
}
fn require_lambda_arrow(&mut self) -> S::R {
self.require_token(TokenKind::EqualEqualGreaterThan, Errors::error1046)
}
fn require_as(&mut self) -> S::R {
self.require_token(TokenKind::As, Errors::error1023)
}
fn require_while(&mut self) -> S::R {
self.require_token(TokenKind::While, Errors::error1018)
}
fn require_coloncolon(&mut self) -> S::R {
self.require_token(TokenKind::ColonColon, Errors::error1047)
}
fn require_name_or_variable_or_error(&mut self, error: Error) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token_as_name();
match token.kind() {
TokenKind::Namespace | TokenKind::Name => {
self.continue_from(parser1);
let token = S!(make_token, self, token);
self.scan_remaining_qualified_name(token)
}
TokenKind::Variable => {
self.continue_from(parser1);
S!(make_token, self, token)
}
_ => {
// ERROR RECOVERY: Create a missing token for the expected token,
// and continue on from the current token. Don't skip it.
self.with_error(error);
S!(make_missing, self, self.pos())
}
}
}
fn require_name_or_variable(&mut self) -> S::R {
self.require_name_or_variable_or_error(Errors::error1050)
}
fn require_xhp_class_name_or_name_or_variable(&mut self) -> S::R {
if self.is_next_xhp_class_name() {
let token = self.next_xhp_class_name();
S!(make_token, self, token)
} else {
self.require_name_or_variable()
}
}
fn require_name_allow_non_reserved(&mut self) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token_non_reserved_as_name();
if token.kind() == TokenKind::Name {
self.continue_from(parser1);
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Create a missing token for the expected token,
// and continue on from the current token. Don't skip it.
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
}
fn next_xhp_category_name(&mut self) -> Token<S> {
self.lexer_mut().next_xhp_category_name()
}
// We have a number of issues involving xhp class names, which begin with
// a colon and may contain internal colons and dashes. These are some
// helper methods to deal with them.
fn is_next_name(&mut self) -> bool {
self.lexer().is_next_name()
}
fn next_xhp_name(&mut self) -> Token<S> {
assert!(self.is_next_name());
self.lexer_mut().next_xhp_name()
}
fn next_xhp_class_name(&mut self) -> Token<S> {
assert!(self.is_next_xhp_class_name());
self.lexer_mut().next_xhp_class_name()
}
fn next_xhp_modifier_class_name(&mut self) -> Token<S> {
self.lexer_mut().next_xhp_modifier_class_name()
}
fn require_xhp_name(&mut self) -> S::R {
if self.is_next_name() {
let token = self.next_xhp_name();
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Create a missing token for the expected token,
// and continue on from the current token. Don't skip it.
// TODO: Different error?
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
}
fn is_next_xhp_category_name(&mut self) -> bool {
self.lexer().is_next_xhp_category_name()
}
fn parse_comma_list_allow_trailing<F>(
&mut self,
close_predicate: TokenKind,
error: Error,
parse_item: F,
) -> (S::R, bool)
where
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list(
TokenKind::Comma,
SeparatedListKind::TrailingAllowed,
close_predicate,
error,
parse_item,
)
}
fn parse_separated_list_predicate<P, SP, F>(
&mut self,
separator_predicate: SP,
list_kind: SeparatedListKind,
close_predicate: P,
error: Error,
parse_item: F,
) -> (S::R, bool, TokenKind)
where
P: Fn(TokenKind) -> bool,
SP: Fn(TokenKind) -> bool,
F: Fn(&mut Self) -> S::R,
{
let mut items = vec![];
// Set this when we first see a separator
let mut separator_kind = TokenKind::Empty;
loop {
// At this point we are expecting an item followed by a separator,
// a close, or, if trailing separators are allowed, both
let kind = self.peek_token_kind();
if close_predicate(kind) || kind == TokenKind::EndOfFile {
// ERROR RECOVERY: We expected an item but we found a close or
// the end of the file. Make the item and separator both
// "missing" and give an error.
//
// If items are optional and we found a close, the last item was
// omitted and there was no error.
if kind == TokenKind::EndOfFile || list_kind != SeparatedListKind::ItemsOptional {
self.with_error(error)
};
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, missing1, missing2);
// TODO(T25649779)
items.push(list_item);
break;
} else if separator_predicate(kind) {
if separator_kind == TokenKind::Empty {
separator_kind = kind;
} else if separator_kind != kind {
self.with_error(Errors::error1063);
}
// ERROR RECOVERY: We expected an item but we got a separator.
// Assume the item was missing, eat the separator, and move on.
//
// If items are optional, there was no error, so eat the separator and
// continue.
//
// TODO: This could be poor recovery. For example:
//
// function bar (Foo< , int blah)
//
// Plainly the type arg is missing, but the comma is not associated with
// the type argument list, it's associated with the formal
// parameter list.
let token = self.next_token();
if list_kind != SeparatedListKind::ItemsOptional {
self.with_error(error.clone())
}
let item = S!(make_missing, self, self.pos());
let separator = S!(make_token, self, token);
let list_item = S!(make_list_item, self, item, separator);
// TODO(T25649779)
items.push(list_item)
} else {
// We got neither a close nor a separator; hopefully we're going
// to parse an item followed by a close or separator.
let item = parse_item(self);
let kind = self.peek_token_kind();
if close_predicate(kind) {
let missing = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, item, missing);
// TODO(T25649779)
items.push(list_item);
break;
} else if separator_predicate(kind) {
if separator_kind == TokenKind::Empty {
separator_kind = kind;
} else if separator_kind != kind {
self.with_error(Errors::error1063);
}
let token = self.next_token();
let separator = S!(make_token, self, token);
let list_item = S!(make_list_item, self, item, separator);
// TODO(T25649779)
items.push(list_item);
let allow_trailing = list_kind != SeparatedListKind::NoTrailing;
// We got an item followed by a separator; what if the thing
// that comes next is a close?
if allow_trailing && close_predicate(self.peek_token_kind()) {
break;
}
} else {
// ERROR RECOVERY: We were expecting a close or separator, but
// got neither. Bail out. Caller will give an error.
let missing = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, item, missing);
// TODO(T25649779)
items.push(list_item);
break;
}
}
}
let no_arg_is_missing = items.iter().all(|x| !x.is_missing());
let item_list = S!(make_list, self, items, self.pos());
(item_list, no_arg_is_missing, separator_kind)
}
fn parse_list_until_none<F>(&mut self, parse_item: F) -> S::R
where
F: Fn(&mut Self) -> Option<S::R>,
{
let mut acc = vec![];
loop {
let maybe_item = parse_item(self);
match maybe_item {
None => break,
Some(item) => {
let is_missing = item.is_missing();
acc.push(item);
if self.peek_token_kind() == TokenKind::EndOfFile ||
// exit if parser did not make any progress
is_missing
{
break;
}
}
}
}
S!(make_list, self, acc, self.pos())
}
fn parse_separated_list_opt_predicate<P, F>(
&mut self,
separator_kind: TokenKind,
allow_trailing: SeparatedListKind,
close_predicate: P,
error: Error,
parse_item: F,
) -> S::R
where
P: Fn(TokenKind) -> bool,
F: Fn(&mut Self) -> S::R,
{
let kind = self.peek_token_kind();
if close_predicate(kind) {
S!(make_missing, self, self.pos())
} else {
let (items, _, _) = self.parse_separated_list_predicate(
|x| x == separator_kind,
allow_trailing,
close_predicate,
error,
parse_item,
);
items
}
}
fn is_next_xhp_class_name(&self) -> bool {
self.lexer().is_next_xhp_class_name()
}
fn next_xhp_modifier_class_name_or_other_token(&mut self) -> Token<S> {
if self.is_next_name() {
self.next_xhp_modifier_class_name()
} else {
self.next_token()
}
}
fn next_xhp_class_name_or_other_token(&mut self) -> Token<S> {
if self.is_next_xhp_class_name() {
self.next_xhp_class_name()
} else {
self.next_token()
}
}
fn parse_separated_list_opt<F>(
&mut self,
separator_kind: TokenKind,
allow_trailing: SeparatedListKind,
close_kind: TokenKind,
error: Error,
parse_item: F,
) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list_opt_predicate(
separator_kind,
allow_trailing,
|x| x == close_kind,
error,
parse_item,
)
}
fn parse_comma_list_opt_allow_trailing<F>(
&mut self,
close_kind: TokenKind,
error: Error,
parse_item: F,
) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list_opt(
TokenKind::Comma,
SeparatedListKind::TrailingAllowed,
close_kind,
error,
parse_item,
)
}
fn parse_comma_list_opt<F>(
&mut self,
close_kind: TokenKind,
error: Error,
parse_item: F,
) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list_opt(
TokenKind::Comma,
SeparatedListKind::NoTrailing,
close_kind,
error,
parse_item,
)
}
fn parse_comma_list_opt_items_opt<F>(
&mut self,
close_kind: TokenKind,
error: Error,
parse_item: F,
) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list_opt(
TokenKind::Comma,
SeparatedListKind::ItemsOptional,
close_kind,
error,
parse_item,
)
}
fn parse_comma_list_opt_allow_trailing_predicate<P, F>(
&mut self,
close_kind: P,
error: Error,
parse_item: F,
) -> S::R
where
P: Fn(TokenKind) -> bool,
F: Fn(&mut Self) -> S::R,
{
self.parse_separated_list_opt_predicate(
TokenKind::Comma,
SeparatedListKind::TrailingAllowed,
close_kind,
error,
parse_item,
)
}
fn parse_comma_list<F>(&mut self, close_kind: TokenKind, error: Error, parse_item: F) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
let (items, _) = self.parse_separated_list(
TokenKind::Comma,
SeparatedListKind::NoTrailing,
close_kind,
error,
parse_item,
);
items
}
fn parse_delimited_list<P>(
&mut self,
left_kind: TokenKind,
left_error: Error,
right_kind: TokenKind,
right_error: Error,
parse_items: P,
) -> (S::R, S::R, S::R)
where
P: FnOnce(&mut Self) -> S::R,
{
let left = self.require_token(left_kind, left_error);
let items = parse_items(self);
let right = self.require_token(right_kind, right_error);
(left, items, right)
}
fn parse_braced_list<P>(&mut self, parse_items: P) -> (S::R, S::R, S::R)
where
P: FnOnce(&mut Self) -> S::R,
{
self.parse_delimited_list(
TokenKind::LeftBrace,
Errors::error1034,
TokenKind::RightBrace,
Errors::error1006,
parse_items,
)
}
fn parse_parenthesized_list<F>(&mut self, parse_items: F) -> (S::R, S::R, S::R)
where
F: FnOnce(&mut Self) -> S::R,
{
self.parse_delimited_list(
TokenKind::LeftParen,
Errors::error1019,
TokenKind::RightParen,
Errors::error1011,
parse_items,
)
}
fn parse_parenthesized_comma_list<F>(&mut self, parse_item: F) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items =
|x: &mut Self| x.parse_comma_list(TokenKind::RightParen, Errors::error1011, parse_item);
self.parse_parenthesized_list(parse_items)
}
fn parse_parenthesized_comma_list_opt_allow_trailing<F>(
&mut self,
parse_item: F,
) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items = |x: &mut Self| {
x.parse_comma_list_opt_allow_trailing(
TokenKind::RightParen,
Errors::error1011,
parse_item,
)
};
self.parse_parenthesized_list(parse_items)
}
fn parse_parenthesized_comma_list_opt_items_opt<F>(
&mut self,
parse_item: F,
) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items = |x: &mut Self| {
x.parse_comma_list_opt_items_opt(TokenKind::RightParen, Errors::error1011, parse_item)
};
self.parse_parenthesized_list(parse_items)
}
fn parse_braced_comma_list_opt_allow_trailing<F>(&mut self, parse_item: F) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items = |parser: &mut Self| {
parser.parse_comma_list_opt_allow_trailing(
TokenKind::RightBrace,
Errors::error1006,
parse_item,
)
};
self.parse_braced_list(parse_items)
}
fn parse_bracketted_list<F>(&mut self, parse_items: F) -> (S::R, S::R, S::R)
where
F: FnOnce(&mut Self) -> S::R,
{
self.parse_delimited_list(
TokenKind::LeftBracket,
Errors::error1026,
TokenKind::RightBracket,
Errors::error1031,
parse_items,
)
}
fn parse_bracketted_comma_list_opt_allow_trailing<F>(
&mut self,
parse_item: F,
) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items = |x: &mut Self| {
x.parse_comma_list_opt_allow_trailing(
TokenKind::RightBracket,
Errors::error1031,
parse_item,
)
};
self.parse_bracketted_list(parse_items)
}
fn parse_double_angled_list<F>(&mut self, parse_items: F) -> (S::R, S::R, S::R)
where
F: FnOnce(&mut Self) -> S::R,
{
self.parse_delimited_list(
TokenKind::LessThanLessThan,
Errors::error1029,
TokenKind::GreaterThanGreaterThan,
Errors::error1029,
parse_items,
)
}
fn parse_double_angled_comma_list_allow_trailing<F>(
&mut self,
parse_item: F,
) -> (S::R, S::R, S::R)
where
F: Fn(&mut Self) -> S::R,
{
let parse_items = |x: &mut Self| {
let (items, _) = x.parse_comma_list_allow_trailing(
TokenKind::GreaterThanGreaterThan,
Errors::error1029,
parse_item,
);
items
};
self.parse_double_angled_list(parse_items)
}
fn scan_remaining_qualified_name(&mut self, name_token: S::R) -> S::R {
let (name, _) = self.scan_remaining_qualified_name_extended(name_token);
name
}
// Parse with parse_item while a condition is met.
fn parse_list_while<F, P>(&mut self, parse_item: F, predicate: P) -> S::R
where
F: Fn(&mut Self) -> S::R,
P: Fn(&Self) -> bool,
{
let mut items = vec![];
loop {
if self.peek_token_kind() == TokenKind::EndOfFile || !predicate(self) {
break;
};
let lexer_before = self.lexer().clone();
let result = parse_item(self);
if result.is_missing() {
// ERROR RECOVERY: If the item is was parsed as 'missing', then it means
// the parser bailed out of that scope. So, pass on whatever's been
// accumulated so far, but with a 'Missing' SyntaxNode prepended.
items.push(result);
break;
}
if lexer_before.start() == self.lexer().start()
&& lexer_before.offset() == self.lexer().offset()
{
// INFINITE LOOP PREVENTION: If parse_item does not actually make
// progress, just bail
items.push(result);
break;
}
// Or if nothing's wrong, continue.
items.push(result)
}
S!(make_list, self, items, self.pos())
}
fn parse_terminated_list<F>(&mut self, parse_item: F, terminator: TokenKind) -> S::R
where
F: Fn(&mut Self) -> S::R,
{
let predicate = |x: &Self| x.peek_token_kind() != terminator;
self.parse_list_while(parse_item, predicate)
}
fn skip_and_log_unexpected_token(&mut self, generate_error: bool) {
if generate_error {
let extra_str = &self.current_token_text();
self.with_error_on_whole_token(Errors::error1057(extra_str))
};
let token = self.next_token();
self.add_skipped_token(token)
}
// Returns true if the strings underlying two tokens are of the same length
// but with one character different.
fn one_character_different<'b>(str1: &'b [u8], str2: &'b [u8]) -> bool {
if str1.len() != str2.len() {
false
} else {
// both strings have same length
let str_len = str1.len();
for i in 0..str_len {
if str1[i] != str2[i] {
// Allow only one mistake
return str1[i + 1..] == str2[i + 1..];
}
}
true
}
}
// Compare the text of the token we have in hand to the text of the
// anticipated kind. Note: this automatically returns false for any
// TokenKinds of length 1.
fn is_misspelled_kind(kind: TokenKind, token_str: &str) -> bool {
let tokenkind_str = kind.to_string().as_bytes();
let token_str = token_str.as_bytes();
if tokenkind_str.len() <= 1 {
false
} else {
Self::one_character_different(tokenkind_str, token_str)
}
}
fn is_misspelled_from<'b>(kind_list: &[TokenKind], token_str: &'b str) -> bool {
kind_list
.iter()
.any(|x| Self::is_misspelled_kind(*x, token_str))
}
// If token_str is a misspelling (by our narrow definition of misspelling)
// of a TokenKind from kind_list, return the TokenKind that token_str is a
// misspelling of. Otherwise, return None.
fn suggested_kind_from(kind_list: &[TokenKind], token_str: &str) -> Option<TokenKind> {
kind_list.iter().find_map(|x| {
if Self::is_misspelled_kind(*x, token_str) {
Some(*x)
} else {
None
}
})
}
fn skip_and_log_misspelled_token(&mut self, required_kind: TokenKind) {
let received_str = &self.current_token_text();
let required_str = required_kind.to_string();
self.with_error_on_whole_token(Errors::error1058(received_str, required_str));
self.skip_and_log_unexpected_token(/* generate_error:*/ false)
}
fn require_token_one_of(&mut self, kinds: &[TokenKind], error: Error) -> S::R {
let token_kind = self.peek_token_kind();
if kinds.iter().any(|x| *x == token_kind) {
let token = self.next_token();
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Look at the next token after this. Is it the one we
// require? If so, process the current token as extra and return the next
// one. Otherwise, create a missing token for what we required,
// and continue on from the current token (don't skip it).
let next_kind = self.peek_token_kind_with_lookahead(1);
if kinds.iter().any(|x| *x == next_kind) {
self.skip_and_log_unexpected_token(true);
let token = self.next_token();
S!(make_token, self, token)
} else {
// ERROR RECOVERY: We know we didn't encounter an extra token.
// So, as a second line of defense, check if the current token
// is a misspelling, by our existing narrow definition of misspelling.
let is_misspelling =
|k: &&TokenKind| Self::is_misspelled_kind(**k, self.current_token_text());
let kind = kinds.iter().find(is_misspelling);
match kind {
Some(kind) => {
self.skip_and_log_misspelled_token(*kind);
S!(make_missing, self, self.pos())
}
None => {
self.with_error(error);
S!(make_missing, self, self.pos())
}
}
}
}
}
fn require_token(&mut self, kind: TokenKind, error: Error) -> S::R {
// Must behave as `require_token_one_of parser [kind] error`
if self.peek_token_kind() == kind {
let token = self.next_token();
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Look at the next token after this. Is it the one we
// require? If so, process the current token as extra and return the next
// one. Otherwise, create a missing token for what we required,
// and continue on from the current token (don't skip it).
let next_kind = self.peek_token_kind_with_lookahead(1);
if next_kind == kind {
self.skip_and_log_unexpected_token(true);
let token = self.next_token();
S!(make_token, self, token)
} else {
// ERROR RECOVERY: We know we didn't encounter an extra token.
// So, as a second line of defense, check if the current token
// is a misspelling, by our existing narrow definition of misspelling.
if Self::is_misspelled_kind(kind, self.current_token_text()) {
self.skip_and_log_misspelled_token(kind);
S!(make_missing, self, self.pos())
} else {
self.with_error(error);
S!(make_missing, self, self.pos())
}
}
}
}
fn require_and_return_token(&mut self, kind: TokenKind, error: Error) -> Option<Token<S>> {
if self.peek_token_kind() == kind {
Some(self.next_token())
} else {
// ERROR RECOVERY: Look at the next token after this. Is it the one we
// require? If so, process the current token as extra and return the next
// one. Otherwise, create a missing token for what we required,
// and continue on from the current token (don't skip it).
let next_kind = self.peek_token_kind_with_lookahead(1);
if next_kind == kind {
self.skip_and_log_unexpected_token(true);
Some(self.next_token())
} else {
// ERROR RECOVERY: We know we didn't encounter an extra token.
// So, as a second line of defense, check if the current token
// is a misspelling, by our existing narrow definition of misspelling.
if Self::is_misspelled_kind(kind, self.current_token_text()) {
self.skip_and_log_misspelled_token(kind);
None
} else {
self.with_error(error);
None
}
}
}
}
fn require_name_allow_all_keywords(&mut self) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token_as_name();
if token.kind() == TokenKind::Name {
self.continue_from(parser1);
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Create a missing token for the expected token,
// and continue on from the current token. Don't skip it.
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
}
fn require_right_paren(&mut self) -> S::R {
self.require_token(TokenKind::RightParen, Errors::error1011)
}
fn require_semicolon_token(&mut self, saw_type_name: bool) -> Option<Token<S>> {
match self.peek_token_kind() {
TokenKind::Variable if saw_type_name => self
.require_and_return_token(TokenKind::Semicolon, Errors::local_variable_with_type),
_ => self.require_and_return_token(TokenKind::Semicolon, Errors::error1010),
}
}
fn require_semicolon(&mut self) -> S::R {
self.require_token(TokenKind::Semicolon, Errors::error1010)
}
fn check_stack_limit(&self) {
if let Some(limit) = self.context().stack_limit.as_ref() {
limit.panic_if_exceeded()
}
}
}
| add_skipped_token |
args.ts | import * as yargs from 'yargs';
export const args: () => Arguments = function getArgs() {
const argv = yargs
.option('no-dependencies',
{
alias: 'no-dep',
describe: 'Don\'t show dependencies',
type: 'boolean'
}
)
.option('no-devDependencies',
{
alias: 'no-dev',
describe: 'Don\'t show devDependencies',
type: 'boolean'
}
)
.option('no-peerDependencies',
{
alias: 'no-peer',
describe: 'Don\'t show peerDependencies',
type: 'boolean'
}
)
.option('no-optionalDependencies',
{
alias: 'no-optional',
describe: 'Don\'t show optionalDependencies',
type: 'boolean'
}
)
.completion()
.recommendCommands()
.help()
.argv;
argv.packages = argv._; | export type Arguments = {
'no-dependencies'?: boolean,
'no-devDependencies'?: boolean,
'no-peerDependencies'?: boolean,
'no-optionalDependencies'?: boolean
noDependencies?: boolean,
noDevDependencies?: boolean,
noPeerDependencies?: boolean,
noOptionalDependencies?: boolean
packages: string[]
} |
return argv;
};
|
iid.go | package aws
import (
"context"
"crypto"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"math"
"os"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl"
"github.com/spiffe/go-spiffe/v2/spiffeid"
nodeattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/nodeattestor/v1"
configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1"
"github.com/spiffe/spire/pkg/common/agentpathtemplate"
"github.com/spiffe/spire/pkg/common/catalog"
caws "github.com/spiffe/spire/pkg/common/plugin/aws"
nodeattestorbase "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
awsTimeout = 5 * time.Second
instanceFilters = []*ec2.Filter{
{
Name: aws.String("instance-state-name"),
Values: []*string{
aws.String("pending"),
aws.String("running"),
},
},
}
)
const (
maxSecondsBetweenDeviceAttachments int64 = 60
// accessKeyIDVarName env var name for AWS access key ID
accessKeyIDVarName = "AWS_ACCESS_KEY_ID"
// secretAccessKeyVarName env car name for AWS secret access key
secretAccessKeyVarName = "AWS_SECRET_ACCESS_KEY" //nolint: gosec // false positive
)
// BuiltIn creates a new built-in plugin
func BuiltIn() catalog.BuiltIn {
return builtin(New())
}
func builtin(p *IIDAttestorPlugin) catalog.BuiltIn {
return catalog.MakeBuiltIn(caws.PluginName,
nodeattestorv1.NodeAttestorPluginServer(p),
configv1.ConfigServiceServer(p),
)
}
// IIDAttestorPlugin implements node attestation for agents running in aws.
type IIDAttestorPlugin struct {
nodeattestorbase.Base
nodeattestorv1.UnsafeNodeAttestorServer
configv1.UnsafeConfigServer
config *IIDAttestorConfig
mtx sync.RWMutex
clients *clientsCache
// test hooks
hooks struct {
getAWSCAPublicKey func() (*rsa.PublicKey, error)
getenv func(string) string
}
log hclog.Logger
}
// IIDAttestorConfig holds hcl configuration for IID attestor plugin
type IIDAttestorConfig struct {
SessionConfig `hcl:",squash"`
SkipBlockDevice bool `hcl:"skip_block_device"`
DisableInstanceProfileSelectors bool `hcl:"disable_instance_profile_selectors"`
LocalValidAcctIDs []string `hcl:"account_ids_for_local_validation"`
AgentPathTemplate string `hcl:"agent_path_template"`
AssumeRole string `hcl:"assume_role"`
pathTemplate *agentpathtemplate.Template
trustDomain spiffeid.TrustDomain
awsCAPublicKey *rsa.PublicKey
}
// New creates a new IIDAttestorPlugin.
func New() *IIDAttestorPlugin |
// Attest implements the server side logic for the aws iid node attestation plugin.
func (p *IIDAttestorPlugin) Attest(stream nodeattestorv1.NodeAttestor_AttestServer) error {
req, err := stream.Recv()
if err != nil {
return err
}
payload := req.GetPayload()
if payload == nil {
return status.Error(codes.InvalidArgument, "missing attestation payload")
}
c, err := p.getConfig()
if err != nil {
return err
}
attestationData, err := unmarshalAndValidateIdentityDocument(payload, c.awsCAPublicKey)
if err != nil {
return err
}
inTrustAcctList := false
for _, id := range c.LocalValidAcctIDs {
if attestationData.AccountID == id {
inTrustAcctList = true
break
}
}
awsClient, err := p.clients.getClient(attestationData.Region, attestationData.AccountID)
if err != nil {
return status.Errorf(codes.Internal, "failed to get client: %v", err)
}
ctx, cancel := context.WithTimeout(stream.Context(), awsTimeout)
defer cancel()
instancesDesc, err := awsClient.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(attestationData.InstanceID)},
Filters: instanceFilters,
})
if err != nil {
return status.Errorf(codes.Internal, "failed to describe instance: %v", err)
}
// Ideally we wouldn't do this work at all if the agent has already attested
// e.g. do it after the call to `p.IsAttested`, however, we may need
// the instance to construct tags used in the agent ID.
//
// This overhead will only effect agents attempting to re-attest which
// should be a very small portion of the overall server workload. This
// is a potential DoS vector.
shouldCheckBlockDevice := !inTrustAcctList && !c.SkipBlockDevice
var instance *ec2.Instance
var tags = make(instanceTags)
if strings.Contains(c.AgentPathTemplate, ".Tags") || shouldCheckBlockDevice {
var err error
instance, err = p.getEC2Instance(instancesDesc)
if err != nil {
return err
}
tags = tagsFromInstance(instance)
}
if shouldCheckBlockDevice {
if err = p.checkBlockDevice(instance); err != nil {
return status.Errorf(codes.Internal, "failed aws ec2 attestation: %v", err)
}
}
agentID, err := makeAgentID(c.trustDomain, c.pathTemplate, attestationData, tags)
if err != nil {
return status.Errorf(codes.Internal, "failed to create spiffe ID: %v", err)
}
attested, err := p.IsAttested(stream.Context(), agentID.String())
switch {
case err != nil:
return err
case attested:
return status.Error(codes.PermissionDenied, "IID has already been used to attest an agent")
}
selectorValues, err := p.resolveSelectors(stream.Context(), instancesDesc, awsClient)
if err != nil {
return err
}
return stream.Send(&nodeattestorv1.AttestResponse{
Response: &nodeattestorv1.AttestResponse_AgentAttributes{
AgentAttributes: &nodeattestorv1.AgentAttributes{
CanReattest: false,
SpiffeId: agentID.String(),
SelectorValues: selectorValues,
},
},
})
}
// Configure configures the IIDAttestorPlugin.
func (p *IIDAttestorPlugin) Configure(ctx context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) {
config := new(IIDAttestorConfig)
if err := hcl.Decode(config, req.HclConfiguration); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "unable to decode configuration: %v", err)
}
// Get the AWS CA public key. We do this lazily on configure so deployments
// not using this plugin don't pay for parsing it on startup. This
// operation should not fail, but we check the return value just in case.
awsCAPublicKey, err := p.hooks.getAWSCAPublicKey()
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to load the AWS CA public key: %v", err)
}
config.awsCAPublicKey = awsCAPublicKey
if err := config.Validate(p.hooks.getenv(accessKeyIDVarName), p.hooks.getenv(secretAccessKeyVarName)); err != nil {
return nil, err
}
if req.CoreConfiguration == nil {
return nil, status.Error(codes.InvalidArgument, "core configuration is required")
}
config.trustDomain, err = spiffeid.TrustDomainFromString(req.CoreConfiguration.TrustDomain)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "core configuration has invalid trust domain: %v", err)
}
config.pathTemplate = defaultAgentPathTemplate
if len(config.AgentPathTemplate) > 0 {
tmpl, err := agentpathtemplate.Parse(config.AgentPathTemplate)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to parse agent svid template: %q", config.AgentPathTemplate)
}
config.pathTemplate = tmpl
}
p.mtx.Lock()
defer p.mtx.Unlock()
p.config = config
p.clients.configure(config.SessionConfig)
return &configv1.ConfigureResponse{}, nil
}
// SetLogger sets this plugin's logger
func (p *IIDAttestorPlugin) SetLogger(log hclog.Logger) {
p.log = log
}
func (p *IIDAttestorPlugin) checkBlockDevice(instance *ec2.Instance) error {
ifaceZeroDeviceIndex := *instance.NetworkInterfaces[0].Attachment.DeviceIndex
if ifaceZeroDeviceIndex != 0 {
return fmt.Errorf("the EC2 instance network interface attachment device index must be zero (has %d)", ifaceZeroDeviceIndex)
}
ifaceZeroAttachTime := instance.NetworkInterfaces[0].Attachment.AttachTime
// skip anti-tampering mechanism when RootDeviceType is instance-store
// specifically, if device type is persistent, and the device was attached past
// a threshold time after instance boot, fail attestation
if *instance.RootDeviceType != ec2.DeviceTypeInstanceStore {
rootDeviceIndex := -1
for i, bdm := range instance.BlockDeviceMappings {
if *bdm.DeviceName == *instance.RootDeviceName {
rootDeviceIndex = i
break
}
}
if rootDeviceIndex == -1 {
return fmt.Errorf("failed to locate the root device block mapping with name %q", *instance.RootDeviceName)
}
rootDeviceAttachTime := instance.BlockDeviceMappings[rootDeviceIndex].Ebs.AttachTime
attachTimeDisparitySeconds := int64(math.Abs(float64(ifaceZeroAttachTime.Unix() - rootDeviceAttachTime.Unix())))
if attachTimeDisparitySeconds > maxSecondsBetweenDeviceAttachments {
return fmt.Errorf("failed checking the disparity device attach times, root BlockDeviceMapping and NetworkInterface[0] attach times differ by %d seconds", attachTimeDisparitySeconds)
}
}
return nil
}
func (p *IIDAttestorPlugin) getConfig() (*IIDAttestorConfig, error) {
p.mtx.RLock()
defer p.mtx.RUnlock()
if p.config == nil {
return nil, status.Error(codes.FailedPrecondition, "not configured")
}
return p.config, nil
}
func (p *IIDAttestorPlugin) getEC2Instance(instancesDesc *ec2.DescribeInstancesOutput) (*ec2.Instance, error) {
if len(instancesDesc.Reservations) < 1 {
return nil, status.Error(codes.Internal, "failed to query AWS via describe-instances: returned no reservations")
}
if len(instancesDesc.Reservations[0].Instances) < 1 {
return nil, status.Error(codes.Internal, "failed to query AWS via describe-instances: returned no instances")
}
return instancesDesc.Reservations[0].Instances[0], nil
}
func tagsFromInstance(instance *ec2.Instance) instanceTags {
tags := make(instanceTags, len(instance.Tags))
for _, tag := range instance.Tags {
if tag != nil && tag.Key != nil && tag.Value != nil {
tags[*tag.Key] = *tag.Value
}
}
return tags
}
func unmarshalAndValidateIdentityDocument(data []byte, pubKey *rsa.PublicKey) (ec2metadata.EC2InstanceIdentityDocument, error) {
var attestationData caws.IIDAttestationData
if err := json.Unmarshal(data, &attestationData); err != nil {
return ec2metadata.EC2InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, "failed to unmarshal the attestation data: %v", err)
}
var doc ec2metadata.EC2InstanceIdentityDocument
if err := json.Unmarshal([]byte(attestationData.Document), &doc); err != nil {
return ec2metadata.EC2InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, "failed to unmarshal the IID: %v", err)
}
docHash := sha256.Sum256([]byte(attestationData.Document))
sigBytes, err := base64.StdEncoding.DecodeString(attestationData.Signature)
if err != nil {
return ec2metadata.EC2InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, "failed to decode the IID signature: %v", err)
}
if err := rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, docHash[:], sigBytes); err != nil {
return ec2metadata.EC2InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, "failed to verify the cryptographic signature: %v", err)
}
return doc, nil
}
func (p *IIDAttestorPlugin) resolveSelectors(parent context.Context, instancesDesc *ec2.DescribeInstancesOutput, client Client) ([]string, error) {
selectorSet := map[string]bool{}
addSelectors := func(values []string) {
for _, value := range values {
selectorSet[value] = true
}
}
c, err := p.getConfig()
if err != nil {
return nil, err
}
for _, reservation := range instancesDesc.Reservations {
for _, instance := range reservation.Instances {
addSelectors(resolveTags(instance.Tags))
addSelectors(resolveSecurityGroups(instance.SecurityGroups))
if !c.DisableInstanceProfileSelectors && instance.IamInstanceProfile != nil && instance.IamInstanceProfile.Arn != nil {
instanceProfileName, err := instanceProfileNameFromArn(*instance.IamInstanceProfile.Arn)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(parent, awsTimeout)
defer cancel()
output, err := client.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{
InstanceProfileName: aws.String(instanceProfileName),
})
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to get intance profile: %v", err)
}
addSelectors(resolveInstanceProfile(output.InstanceProfile))
}
}
}
// build and sort selectors
selectors := []string{}
for value := range selectorSet {
selectors = append(selectors, value)
}
sort.Strings(selectors)
return selectors, nil
}
func resolveTags(tags []*ec2.Tag) []string {
values := make([]string, 0, len(tags))
for _, tag := range tags {
if tag != nil {
values = append(values, fmt.Sprintf("tag:%s:%s", aws.StringValue(tag.Key), aws.StringValue(tag.Value)))
}
}
return values
}
func resolveSecurityGroups(sgs []*ec2.GroupIdentifier) []string {
values := make([]string, 0, len(sgs)*2)
for _, sg := range sgs {
if sg != nil {
values = append(values,
fmt.Sprintf("sg:id:%s", aws.StringValue(sg.GroupId)),
fmt.Sprintf("sg:name:%s", aws.StringValue(sg.GroupName)),
)
}
}
return values
}
func resolveInstanceProfile(instanceProfile *iam.InstanceProfile) []string {
if instanceProfile == nil {
return nil
}
values := make([]string, 0, len(instanceProfile.Roles))
for _, role := range instanceProfile.Roles {
if role != nil && role.Arn != nil {
values = append(values, fmt.Sprintf("iamrole:%s", aws.StringValue(role.Arn)))
}
}
return values
}
var reInstanceProfileARNResource = regexp.MustCompile(`instance-profile[/:](.+)`)
func instanceProfileNameFromArn(profileArn string) (string, error) {
a, err := arn.Parse(profileArn)
if err != nil {
return "", status.Errorf(codes.Internal, "failed to parse %v", err)
}
m := reInstanceProfileARNResource.FindStringSubmatch(a.Resource)
if m == nil {
return "", status.Errorf(codes.Internal, "arn is not for an instance profile")
}
name := strings.Split(m[1], "/")
// only the last element is the profile name
return name[len(name)-1], nil
}
| {
p := &IIDAttestorPlugin{}
p.clients = newClientsCache(defaultNewClientCallback)
p.hooks.getAWSCAPublicKey = getAWSCAPublicKey
p.hooks.getenv = os.Getenv
return p
} |
cairo_back.rs | // allows e.g. raw_data[dst_off + x * 4 + 2] = buf[src_off + x * 4 + 0];
#![allow(clippy::identity_op)]
//! Support for piet Cairo back-end.
use cairo::{Context, Format, ImageSurface};
#[cfg(feature = "png")]
use png::{ColorType, Encoder};
#[cfg(feature = "png")]
use std::fs::File;
#[cfg(feature = "png")]
use std::io::BufWriter;
use std::marker::PhantomData;
use std::path::Path;
use piet::ImageFormat;
#[doc(hidden)]
pub use piet_cairo::*;
/// The `RenderContext` for the Cairo backend, which is selected.
pub type Piet<'a> = CairoRenderContext<'a>;
/// The associated brush type for this backend.
///
/// This type matches `RenderContext::Brush`
pub type Brush = piet_cairo::Brush;
/// The associated text factory for this backend.
///
/// This type matches `RenderContext::Text`
pub type PietText = CairoText;
/// The associated font type for this backend.
///
/// This type matches `RenderContext::Text::Font`
pub type PietFont = CairoFont;
/// The associated font builder for this backend.
///
/// This type matches `RenderContext::Text::FontBuilder`
pub type PietFontBuilder = CairoFontBuilder;
/// The associated text layout type for this backend.
///
/// This type matches `RenderContext::Text::TextLayout`
pub type PietTextLayout = CairoTextLayout;
/// The associated text layout builder for this backend.
///
/// This type matches `RenderContext::Text::TextLayoutBuilder`
pub type PietTextLayoutBuilder = CairoTextLayoutBuilder;
/// The associated image type for this backend.
///
/// This type matches `RenderContext::Image`
pub type Image = ImageSurface;
/// A struct that can be used to create bitmap render contexts.
///
/// In the case of Cairo, being a software renderer, no state is needed.
pub struct Device {
// Since not all backends can support `Device: Sync`, make it non-Sync here to, for fewer
// portability surprises.
marker: std::marker::PhantomData<*const ()>,
}
unsafe impl Send for Device {}
/// A struct provides a `RenderContext` and then can have its bitmap extracted.
pub struct BitmapTarget<'a> {
surface: ImageSurface,
cr: Context,
phantom: PhantomData<&'a ()>,
}
impl Device {
/// Create a new device.
pub fn new() -> Result<Device, piet::Error> |
/// Create a new bitmap target.
pub fn bitmap_target(
&mut self,
width: usize,
height: usize,
pix_scale: f64,
) -> Result<BitmapTarget, piet::Error> {
let surface = ImageSurface::create(Format::ARgb32, width as i32, height as i32).unwrap();
let cr = Context::new(&surface);
cr.scale(pix_scale, pix_scale);
let phantom = Default::default();
Ok(BitmapTarget {
surface,
cr,
phantom,
})
}
}
impl<'a> BitmapTarget<'a> {
/// Get a piet `RenderContext` for the bitmap.
///
/// Note: caller is responsible for calling `finish` on the render
/// context at the end of rendering.
pub fn render_context(&mut self) -> CairoRenderContext {
CairoRenderContext::new(&self.cr)
}
/// Get raw RGBA pixels from the bitmap by copying them into `buf`. If all the pixels were
/// copied, returns the number of bytes written. If `buf` wasn't big enough, returns an error
/// and doesn't write anything.
pub fn copy_raw_pixels(
&mut self,
fmt: ImageFormat,
buf: &mut [u8],
) -> Result<usize, piet::Error> {
// TODO: convert other formats.
if fmt != ImageFormat::RgbaPremul {
return Err(piet::Error::NotSupported);
}
self.surface.flush();
let stride = self.surface.get_stride() as usize;
let width = self.surface.get_width() as usize;
let height = self.surface.get_height() as usize;
let size = width * height * 4;
if buf.len() < size {
return Err(piet::Error::InvalidInput);
}
unsafe {
// Cairo's rust wrapper has extra safety checks that we want to avoid: it won't let us
// get the data from an ImageSurface that's still referenced by a context. The C docs
// don't seem to think that's a problem, as long as we call flush (which we already
// did), and promise not to mutate anything.
// https://www.cairographics.org/manual/cairo-Image-Surfaces.html#cairo-image-surface-get-data
//
// TODO: we can simplify this once cairo makes a release containing
// https://github.com/gtk-rs/cairo/pull/330
let data_len = height.saturating_sub(1) * stride + width * 4;
let data = {
let data_ptr = cairo_sys::cairo_image_surface_get_data(self.surface.to_raw_none());
if data_ptr.is_null() {
let err = cairo::BorrowError::from(cairo::Status::SurfaceFinished);
return Err((Box::new(err) as Box<dyn std::error::Error>).into());
}
std::slice::from_raw_parts(data_ptr, data_len)
};
// A sanity check for all the unsafe indexing that follows.
assert!(data.get(data_len - 1).is_some());
assert!(buf.get(size - 1).is_some());
for y in 0..height {
let src_off = y * stride;
let dst_off = y * width * 4;
for x in 0..width {
// These unchecked indexes allow the autovectorizer to shine.
// Note that dst_off maxes out at (height - 1) * width * 4, and so
// dst_off + x * 4 + 3 maxes out at height * width * 4 - 1, which is size - 1.
// Also, src_off maxes out at (height - 1) * stride, and so
// src_off + x * 4 + 3 maxes out at (height - 1) * stride + width * 4 - 1,
// which is data_len - 1.
*buf.get_unchecked_mut(dst_off + x * 4 + 0) =
*data.get_unchecked(src_off + x * 4 + 2);
*buf.get_unchecked_mut(dst_off + x * 4 + 1) =
*data.get_unchecked(src_off + x * 4 + 1);
*buf.get_unchecked_mut(dst_off + x * 4 + 2) =
*data.get_unchecked(src_off + x * 4 + 0);
*buf.get_unchecked_mut(dst_off + x * 4 + 3) =
*data.get_unchecked(src_off + x * 4 + 3);
}
}
}
Ok(size)
}
/// Get raw RGBA pixels from the bitmap.
pub fn raw_pixels(&mut self, fmt: ImageFormat) -> Result<Vec<u8>, piet::Error> {
let width = self.surface.get_width() as usize;
let height = self.surface.get_height() as usize;
let mut buf = vec![0; width * height * 4];
self.copy_raw_pixels(fmt, &mut buf)?;
Ok(buf)
}
/// Get raw RGBA pixels from the bitmap.
#[deprecated(since = "0.2.0", note = "use raw_pixels")]
pub fn into_raw_pixels(mut self, fmt: ImageFormat) -> Result<Vec<u8>, piet::Error> {
self.raw_pixels(fmt)
}
/// Save bitmap to RGBA PNG file
#[cfg(feature = "png")]
pub fn save_to_file<P: AsRef<Path>>(mut self, path: P) -> Result<(), piet::Error> {
let height = self.surface.get_height();
let width = self.surface.get_width();
let image = self.raw_pixels(ImageFormat::RgbaPremul)?;
let file = BufWriter::new(File::create(path).map_err(Into::<Box<_>>::into)?);
let mut encoder = Encoder::new(file, width as u32, height as u32);
encoder.set_color(ColorType::RGBA);
encoder
.write_header()
.map_err(Into::<Box<_>>::into)?
.write_image_data(&image)
.map_err(Into::<Box<_>>::into)?;
Ok(())
}
/// Stub for feature is missing
#[cfg(not(feature = "png"))]
pub fn save_to_file<P: AsRef<Path>>(self, _path: P) -> Result<(), piet::Error> {
Err(piet::Error::MissingFeature)
}
}
| {
Ok(Device {
marker: std::marker::PhantomData,
})
} |
traceDetailsViewCommands.js | import { newLogger } from '@dbux/common/src/log/logger';
import { registerCommand } from './commandUtil';
import { showInformationMessage } from '../codeUtil/codeModals';
import { nextMode } from '../traceDetailsView/nodes/ExecutionsTDNodes';
import { NavigationMethods } from '../traceDetailsView/nodes/NavigationNode';
import { translate } from '../lang';
// eslint-disable-next-line no-unused-vars
const { log, debug, warn, error: logError } = newLogger('Commands');
export function initTraceDetailsViewCommands(context, traceDetailsViewController) {
registerCommand(context,
'dbuxTraceDetailsView.nextGroupingMode',
(/* node */) => {
nextMode();
traceDetailsViewController.refresh();
}
);
registerCommand(context,
'dbuxTraceDetailsView.expandNode',
async (node) => {
await node.treeNodeProvider.treeView.reveal(node, { select: false, expand: 2 });
}
);
registerCommand(context,
'dbuxTraceDetailsView.selectObject',
(node) => {
node.selectObject();
}
);
registerCommand(context,
'dbuxTraceDetailsView.valueRender',
(node) => {
node.valueRender();
}
);
for (let methodName of NavigationMethods) {
registerCommand(context,
`dbuxTraceDetailsView.navigation.${methodName}`,
(navigationNode) => {
navigationNode?.select(methodName);
}
);
}
registerCommand(context,
'dbuxTraceDetailsView.selectTraceAtCursor',
traceDetailsViewController.selectTraceAtCursor
);
registerCommand(context,
'dbuxTraceDetailsView.selectTraceAtCursor.empty',
() => showInformationMessage(translate('noTrace'))
);
registerCommand(context,
'dbuxTraceDetailsView.node.selectWriteTrace',
(node) => {
node.selectWriteTrace(); |
registerCommand(context,
'dbuxTraceDetailsView.node.selectValueCreation',
(node) => {
node.selectValueCreationTrace();
}
);
registerCommand(context,
'dbuxTraceDetailsView.node.selectForkParent',
(node) => {
node.selectForkParent();
}
);
registerCommand(context,
'dbuxTraceDetailsView.node.selectScheduler',
(node) => {
node.selectScheduler();
}
);
registerCommand(context,
'dbuxTraceDetailsView.context.showError',
() => traceDetailsViewController.showError()
);
registerCommand(context,
'dbuxTraceDetailsView.context.showError.disabled',
() => showInformationMessage('No error occurred.')
);
} | }
); |
gen_models.py | # flake8: noqa
import csv
def model_name(table_name):
if table_name in ["vtm", "vpi", "vmp", "vmpp", "amp", "ampp", "gtin"]:
return table_name.upper()
else:
return "".join(tok.title() for tok in table_name.split("_"))
def quote(s):
|
with open("schema.csv") as f:
lines = list(csv.DictReader(f))
print("from django.db import models")
table = None
for line in lines:
if line["table"] == "ccontent":
continue
if line["table"] != table:
table = line["table"]
print()
print()
print(f"class {model_name(table)}(models.Model):")
print("# class Meta:")
print('# verbose_name = "TODO"')
print()
if line["type"] == "retired":
continue
options = []
if line["primary_key"] == "True":
options.append(("primary_key", "True"))
if line["db_column"]:
options.append(("db_column", quote(line["db_column"])))
if line["type"] in ["ForeignKey", "OneToOneField"]:
options.append(("to", quote(model_name(line["to"]))))
options.append(("on_delete", "models.CASCADE"))
if "prevcd" in line["db_column"] or "uomcd" in line["db_column"]:
options.append(("related_name", quote("+")))
elif line["type"] == "CharField":
options.append(("max_length", line["max_length"]))
elif line["type"] == "DecimalField":
options.append(("max_digits", line["max_digits"]))
options.append(("decimal_places", line["decimal_places"]))
if line["optional"] == "Y":
if line["type"] != "BooleanField" and line["primary_key"] != "True":
options.append(("null", "True"))
options.append(("help_text", quote(line["descr"])))
print(f' {line["field"]} = models.{line["type"]}(')
for k, v in options:
print(f" {k}={v},")
print(" )")
| assert '"' not in s
return '"' + s + '"' |
idp_interface.py | import json
import logging
import random
import requests
from hashlib import sha1 as sha_constructor
from django.conf import settings
from gluu_ecommerce.connectors.uma_access import obtain_authorized_rpt_token
logger = logging.getLogger('idp')
SCIM_CREATE_USER_ENDPOINT = 'https://idp.gluu.org/identity/seam/resource/restv1/scim/v2/Users/'
SCIM_UPDATE_USER_ENDPOINT = 'https://idp.gluu.org/identity/seam/resource/restv1/scim/v2/Users/{}/'
def create_user(user, password, active=False):
headers = {'Content-Type': 'application/json'}
params = {}
payload = {
'schemas': ['urn:ietf:params:scim:schemas:core:2.0:User'],
'userName': sha_constructor(str(random.random())).hexdigest()[:12],
'name': {'givenName': user.first_name, 'familyName': user.last_name},
'displayName': u'{}{}'.format(user.first_name, user.last_name),
'password': password,
'emails': [
{'value': user.email, 'primary': True, 'type': 'Work'}
],
'phoneNumbers': [
{'value': user.phone_number, 'primary': True, 'type': 'Work'}
],
}
if active:
payload['active'] = True
url = SCIM_CREATE_USER_ENDPOINT
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.post(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 201:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
response = response.json()
return response['id']
| headers = {'Content-Type': 'application/json'}
params = {}
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
payload = {'active': True}
response = requests.put(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 200:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
def update_user(user):
headers = {'Content-Type': 'application/json'}
params = {}
if not user.idp_uuid:
logger.error('Error writing to idp, missing uid: {}'.format(user.email))
return
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
payload = {
'name': {'givenName': user.first_name, 'familyName': user.last_name},
'displayName': u'{}{}'.format(user.first_name, user.last_name),
'phoneNumbers': [
{'value': user.mobile_number, 'primary': True, 'type': 'Work'}
],
'timezone': user.timezone,
'title': user.job_title
}
response = requests.put(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 200:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
logger.info('Successfully updated {}'.format(user.email))
def get_user(user):
if not user.idp_uuid:
logger.error('Error writing to idp, missing uid: {}'.format(user.email))
return
headers = {'Content-Type': 'application/json'}
params = {}
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.get(url, verify=settings.VERIFY_SSL, headers=headers)
if response.status_code != 200:
message = 'Error retrieving idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
return response.json()
def email_exists(email):
headers = {'Content-Type': 'application/json'}
url = SCIM_CREATE_USER_ENDPOINT
params = {'filter': 'emails.value eq "{}"'.format(email)}
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.get(url, verify=settings.VERIFY_SSL, headers=headers, params=params)
if response.status_code != 200:
message = 'Error retrieving from idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
no_records = int(response.json()['totalResults'])
if no_records not in [0, 1]:
message = 'Unexpected number of records found for {}'.email
logger.error(message)
raise Exception(message)
return no_records == 1 |
def activate_user(user):
|
models.py | import base64
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
import string
import random
| DEFAULT_ENVIRONMENT_ID = 1
class Flavor(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512)
cpu = models.TextField(blank=True, null=True)
mem = models.TextField(blank=True, null=True)
gpu = models.TextField(blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class Environment(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512, blank=True, null=True)
image = models.CharField(max_length=512)
dockerfile = models.TextField(default='FROM jupyter/base-notebook')
startup = models.TextField(null=True, blank=True)
teardown = models.TextField(null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class ProjectManager(models.Manager):
def generate_passkey(self, length=20):
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(length))
# Encrypt the key
password = password.encode('ascii')
base64_bytes = base64.b64encode(password)
password = base64_bytes.decode('ascii')
return password
def create_project(self, name, owner, description, repository):
letters = string.ascii_lowercase
slug = name.replace(" ","-").replace("_","-")
from .helpers import urlify
slug = urlify(slug)
slug_extension = ''.join(random.choice(letters) for i in range(3))
slug = '{}-{}'.format(slugify(slug), slug_extension)
key = self.generate_passkey()
secret = self.generate_passkey(40)
project = self.create(name=name, owner=owner, slug=slug, project_key=key, project_secret=secret,
description=description, repository=repository,
repository_imported=False)
return project
class Project(models.Model):
objects = ProjectManager()
name = models.CharField(max_length=512, unique=True)
description = models.TextField(null=True, blank=True)
slug = models.CharField(max_length=512, unique=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='owner')
authorized = models.ManyToManyField(User, blank=True)
image = models.CharField(max_length=2048, blank=True, null=True)
project_key = models.CharField(max_length=512)
project_secret = models.CharField(max_length=512)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
repository = models.CharField(max_length=512, null=True, blank=True)
repository_imported = models.BooleanField(default=False)
def __str__(self):
return "Name: {} Description: {}".format(self.name, self.description)
environment = models.ForeignKey('projects.Environment', on_delete=models.DO_NOTHING, default=DEFAULT_ENVIRONMENT_ID)
clone_url = models.CharField(max_length=512, null=True, blank=True) | |
ptr.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Conveniences for working with unsafe pointers, the `*T`, and `*mut T` types.
//!
//! Working with unsafe pointers in Rust is fairly uncommon,
//! and often limited to some narrow use cases: holding
//! an unsafe pointer when safe pointers are unsuitable;
//! checking for null; and converting back to safe pointers.
//! As a result, there is not yet an abundance of library code
//! for working with unsafe pointers, and in particular,
//! since pointer math is fairly uncommon in Rust, it is not
//! all that convenient.
//!
//! Use the [`null` function](fn.null.html) to create null pointers,
//! the [`is_null`](trait.RawPtr.html#tymethod.is_null)
//! and [`is_not_null`](trait.RawPtr.html#method.is_not_null)
//! methods of the [`RawPtr` trait](trait.RawPtr.html) to check for null.
//! The `RawPtr` trait is imported by the prelude, so `is_null` etc.
//! work everywhere.
//!
//! # Common ways to create unsafe pointers
//!
//! ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
//!
//! ```
//! let my_num: int = 10;
//! let my_num_ptr: *int = &my_num;
//! let mut my_speed: int = 88;
//! let my_speed_ptr: *mut int = &mut my_speed;
//! ```
//!
//! This does not take ownership of the original allocation
//! and requires no resource management later,
//! but you must not use the pointer after its lifetime.
//!
//! ## 2. Transmute an owned box (`~T`).
//!
//! The `transmute` function takes, by value, whatever it's given
//! and returns it as whatever type is requested, as long as the
//! types are the same size. Because `~T` and `*T` have the same
//! representation they can be trivially,
//! though unsafely, transformed from one type to the other.
//!
//! ```
//! use std::cast;
//!
//! unsafe {
//! let my_num: ~int = ~10;
//! let my_num: *int = cast::transmute(my_num);
//! let my_speed: ~int = ~88;
//! let my_speed: *mut int = cast::transmute(my_speed);
//!
//! // By taking ownership of the original `~T` though
//! // we are obligated to transmute it back later to be destroyed.
//! drop(cast::transmute::<_, ~int>(my_speed));
//! drop(cast::transmute::<_, ~int>(my_num));
//! }
//! ```
//!
//! Note that here the call to `drop` is for clarity - it indicates
//! that we are done with the given value and it should be destroyed.
//!
//! ## 3. Get it from C.
//!
//! ```
//! extern crate libc;
//!
//! use std::mem;
//!
//! fn main() {
//! unsafe {
//! let my_num: *mut int = libc::malloc(mem::size_of::<int>() as libc::size_t) as *mut int;
//! if my_num.is_null() {
//! fail!("failed to allocate memory");
//! }
//! libc::free(my_num as *mut libc::c_void);
//! }
//! }
//! ```
//!
//! Usually you wouldn't literally use `malloc` and `free` from Rust,
//! but C APIs hand out a lot of pointers generally, so are a common source
//! of unsafe pointers in Rust.
use cast;
use clone::Clone;
#[cfg(not(test))]
use cmp::Equiv;
use iter::{range, Iterator};
use mem;
use option::{Option, Some, None};
use intrinsics;
#[cfg(not(test))] use cmp::{Eq, TotalEq, Ord};
/// Return the offset of the first null pointer in `buf`.
#[inline]
pub unsafe fn buf_len<T>(buf: **T) -> uint {
position(buf, |i| *i == null())
}
impl<T> Clone for *T {
#[inline]
fn clone(&self) -> *T {
*self
}
}
impl<T> Clone for *mut T {
#[inline]
fn clone(&self) -> *mut T {
*self
}
}
/// Return the first offset `i` such that `f(buf[i]) == true`.
#[inline]
pub unsafe fn position<T>(buf: *T, f: |&T| -> bool) -> uint {
let mut i = 0;
loop {
if f(&(*buf.offset(i as int))) { return i; }
else { i += 1; }
}
}
/// Create an null pointer.
///
/// # Example
///
/// ```
/// use std::ptr;
///
/// let p: *int = ptr::null();
/// assert!(p.is_null());
/// ```
#[inline]
pub fn null<T>() -> *T { 0 as *T }
/// Create an unsafe mutable null pointer.
///
/// # Example
///
/// ```
/// use std::ptr;
///
/// let p: *mut int = ptr::mut_null();
/// assert!(p.is_null());
/// ```
#[inline]
pub fn mut_null<T>() -> *mut T { 0 as *mut T }
/// Copies data from one location to another.
///
/// Copies `count` elements (not bytes) from `src` to `dst`. The source
/// and destination may overlap.
///
/// `copy_memory` is semantically equivalent to C's `memmove`.
///
/// # Example
///
/// Efficiently create a Rust vector from an unsafe buffer:
///
/// ```
/// use std::ptr;
///
/// unsafe fn from_buf_raw<T>(ptr: *T, elts: uint) -> Vec<T> {
/// let mut dst = Vec::with_capacity(elts);
/// dst.set_len(elts);
/// ptr::copy_memory(dst.as_mut_ptr(), ptr, elts);
/// dst
/// }
/// ```
///
#[inline]
pub unsafe fn copy_memory<T>(dst: *mut T, src: *T, count: uint) {
intrinsics::copy_memory(dst, src, count)
}
/// Copies data from one location to another.
///
/// Copies `count` elements (not bytes) from `src` to `dst`. The source
/// and destination may *not* overlap.
///
/// `copy_nonoverlapping_memory` is semantically equivalent to C's `memcpy`.
///
/// # Example
///
/// A safe swap function:
///
/// ```
/// use std::cast;
/// use std::mem;
/// use std::ptr;
///
/// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
/// // Give ourselves some scratch space to work with
/// let mut t: T = mem::uninit();
///
/// // Perform the swap, `&mut` pointers never alias
/// ptr::copy_nonoverlapping_memory(&mut t, &*x, 1);
/// ptr::copy_nonoverlapping_memory(x, &*y, 1);
/// ptr::copy_nonoverlapping_memory(y, &t, 1);
///
/// // y and t now point to the same thing, but we need to completely forget `tmp`
/// // because it's no longer relevant.
/// cast::forget(t);
/// }
/// }
/// ```
///
/// # Safety Note
///
/// If the source and destination overlap then the behavior of this
/// function is undefined.
#[inline]
pub unsafe fn copy_nonoverlapping_memory<T>(dst: *mut T,
src: *T,
count: uint) {
intrinsics::copy_nonoverlapping_memory(dst, src, count)
}
/// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
/// bytes of memory starting at `dst` to `c`.
#[inline]
pub unsafe fn set_memory<T>(dst: *mut T, c: u8, count: uint) {
intrinsics::set_memory(dst, c, count)
}
/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`
#[inline]
pub unsafe fn zero_memory<T>(dst: *mut T, count: uint) {
set_memory(dst, 0, count);
}
/// Swap the values at two mutable locations of the same type, without
/// deinitialising either. They may overlap.
#[inline]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninit();
let t: *mut T = &mut tmp;
// Perform the swap
copy_nonoverlapping_memory(t, &*x, 1);
copy_memory(x, &*y, 1); // `x` and `y` may overlap
copy_nonoverlapping_memory(y, &*t, 1);
// y and t now point to the same thing, but we need to completely forget `tmp`
// because it's no longer relevant.
cast::forget(tmp);
}
/// Replace the value at a mutable location with a new one, returning the old
/// value, without deinitialising either.
#[inline]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(cast::transmute(dest), &mut src); // cannot overlap
src
}
/// Reads the value from `*src` and returns it.
#[inline(always)]
pub unsafe fn read<T>(src: *T) -> T {
let mut tmp: T = mem::uninit();
copy_nonoverlapping_memory(&mut tmp, src, 1);
tmp
}
/// Reads the value from `*src` and nulls it out.
/// This currently prevents destructors from executing.
#[inline(always)]
pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now zero out `dest`:
zero_memory(dest, 1);
tmp
}
/// Given a **T (pointer to an array of pointers),
/// iterate through each *T, up to the provided `len`,
/// passing to the provided callback function
pub unsafe fn array_each_with_len<T>(arr: **T, len: uint, cb: |*T|) {
if arr.is_null() {
fail!("ptr::array_each_with_len failure: arr input is null pointer");
}
//let start_ptr = *arr;
for e in range(0, len) {
let n = arr.offset(e as int);
cb(*n);
}
}
/// Given a null-pointer-terminated **T (pointer to
/// an array of pointers), iterate through each *T,
/// passing to the provided callback function
///
/// # Safety Note
///
/// This will only work with a null-terminated
/// pointer array.
pub unsafe fn array_each<T>(arr: **T, cb: |*T|) {
if arr.is_null() {
fail!("ptr::array_each_with_len failure: arr input is null pointer");
}
let len = buf_len(arr);
array_each_with_len(arr, len, cb);
}
/// Extension methods for raw pointers.
pub trait RawPtr<T> {
/// Returns the null pointer.
fn null() -> Self;
/// Returns true if the pointer is equal to the null pointer.
fn is_null(&self) -> bool;
/// Returns true if the pointer is not equal to the null pointer.
fn is_not_null(&self) -> bool { !self.is_null() }
/// Returns the value of this pointer (ie, the address it points to)
fn to_uint(&self) -> uint;
/// Returns `None` if the pointer is null, or else returns the value wrapped
/// in `Some`.
///
/// # Safety Notes
///
/// While this method is useful for null-safety, it is important to note
/// that this is still an unsafe operation because the returned value could
/// be pointing to invalid memory.
unsafe fn to_option(&self) -> Option<&T>;
/// Calculates the offset from a pointer. The offset *must* be in-bounds of
/// the object, or one-byte-past-the-end. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
unsafe fn offset(self, count: int) -> Self;
}
impl<T> RawPtr<T> for *T {
#[inline]
fn null() -> *T { null() }
#[inline]
fn is_null(&self) -> bool { *self == RawPtr::null() }
#[inline]
fn to_uint(&self) -> uint { *self as uint }
#[inline]
unsafe fn offset(self, count: int) -> *T { intrinsics::offset(self, count) }
#[inline]
unsafe fn to_option(&self) -> Option<&T> {
if self.is_null() {
None
} else {
Some(cast::transmute(*self))
}
}
}
impl<T> RawPtr<T> for *mut T {
#[inline]
fn null() -> *mut T { mut_null() }
#[inline]
fn is_null(&self) -> bool { *self == RawPtr::null() }
#[inline]
fn to_uint(&self) -> uint { *self as uint }
#[inline]
unsafe fn offset(self, count: int) -> *mut T { intrinsics::offset(self as *T, count) as *mut T }
#[inline]
unsafe fn to_option(&self) -> Option<&T> {
if self.is_null() {
None
} else {
Some(cast::transmute(*self))
}
}
}
// Equality for pointers
#[cfg(not(test))]
impl<T> Eq for *T {
#[inline]
fn eq(&self, other: &*T) -> bool {
*self == *other
}
#[inline]
fn ne(&self, other: &*T) -> bool { !self.eq(other) }
}
#[cfg(not(test))]
impl<T> TotalEq for *T {}
#[cfg(not(test))]
impl<T> Eq for *mut T {
#[inline]
fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
#[inline]
fn ne(&self, other: &*mut T) -> bool { !self.eq(other) }
}
#[cfg(not(test))]
impl<T> TotalEq for *mut T {}
// Equivalence for pointers
#[cfg(not(test))]
impl<T> Equiv<*mut T> for *T {
fn equiv(&self, other: &*mut T) -> bool {
self.to_uint() == other.to_uint()
}
}
#[cfg(not(test))]
impl<T> Equiv<*T> for *mut T {
fn equiv(&self, other: &*T) -> bool {
self.to_uint() == other.to_uint()
}
}
// Equality for extern "C" fn pointers
#[cfg(not(test))]
mod externfnpointers {
use cast;
use cmp::Eq;
impl<_R> Eq for extern "C" fn() -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn() -> _R) -> bool {
let self_: *() = unsafe { cast::transmute(*self) };
let other_: *() = unsafe { cast::transmute(*other) };
self_ == other_
}
#[inline]
fn ne(&self, other: &extern "C" fn() -> _R) -> bool {
!self.eq(other)
}
}
macro_rules! fnptreq(
($($p:ident),*) => {
impl<_R,$($p),*> Eq for extern "C" fn($($p),*) -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
let self_: *() = unsafe { cast::transmute(*self) };
let other_: *() = unsafe { cast::transmute(*other) };
self_ == other_
}
#[inline]
fn ne(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
!self.eq(other)
}
}
}
)
fnptreq!(A)
fnptreq!(A,B)
fnptreq!(A,B,C)
fnptreq!(A,B,C,D)
fnptreq!(A,B,C,D,E)
}
// Comparison for pointers
#[cfg(not(test))]
impl<T> Ord for *T {
#[inline]
fn lt(&self, other: &*T) -> bool {
*self < *other
}
#[inline]
fn le(&self, other: &*T) -> bool {
*self <= *other
}
#[inline]
fn ge(&self, other: &*T) -> bool {
*self >= *other
}
#[inline]
fn gt(&self, other: &*T) -> bool {
*self > *other
}
}
#[cfg(not(test))]
impl<T> Ord for *mut T {
#[inline]
fn lt(&self, other: &*mut T) -> bool {
*self < *other
}
#[inline]
fn le(&self, other: &*mut T) -> bool {
*self <= *other
}
#[inline]
fn ge(&self, other: &*mut T) -> bool {
*self >= *other
}
#[inline]
fn gt(&self, other: &*mut T) -> bool {
*self > *other
}
}
#[cfg(test)]
pub mod ptr_tests {
use super::*;
use prelude::*;
use c_str::ToCStr;
use cast;
use libc;
use str;
use slice::{ImmutableVector, MutableVector};
#[test]
fn | () {
unsafe {
struct Pair {
fst: int,
snd: int
};
let mut p = Pair {fst: 10, snd: 20};
let pptr: *mut Pair = &mut p;
let iptr: *mut int = cast::transmute(pptr);
assert_eq!(*iptr, 10);
*iptr = 30;
assert_eq!(*iptr, 30);
assert_eq!(p.fst, 30);
*pptr = Pair {fst: 50, snd: 60};
assert_eq!(*iptr, 50);
assert_eq!(p.fst, 50);
assert_eq!(p.snd, 60);
let v0 = ~[32000u16, 32001u16, 32002u16];
let mut v1 = ~[0u16, 0u16, 0u16];
copy_memory(v1.as_mut_ptr().offset(1),
v0.as_ptr().offset(1), 1);
assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
copy_memory(v1.as_mut_ptr(),
v0.as_ptr().offset(2), 1);
assert!((v1[0] == 32002u16 && v1[1] == 32001u16 &&
v1[2] == 0u16));
copy_memory(v1.as_mut_ptr().offset(2),
v0.as_ptr(), 1u);
assert!((v1[0] == 32002u16 && v1[1] == 32001u16 &&
v1[2] == 32000u16));
}
}
#[test]
fn test_position() {
use libc::c_char;
"hello".with_c_str(|p| {
unsafe {
assert!(2u == position(p, |c| *c == 'l' as c_char));
assert!(4u == position(p, |c| *c == 'o' as c_char));
assert!(5u == position(p, |c| *c == 0 as c_char));
}
})
}
#[test]
fn test_buf_len() {
"hello".with_c_str(|p0| {
"there".with_c_str(|p1| {
"thing".with_c_str(|p2| {
let v = ~[p0, p1, p2, null()];
unsafe {
assert_eq!(buf_len(v.as_ptr()), 3u);
}
})
})
})
}
#[test]
fn test_is_null() {
let p: *int = null();
assert!(p.is_null());
assert!(!p.is_not_null());
let q = unsafe { p.offset(1) };
assert!(!q.is_null());
assert!(q.is_not_null());
let mp: *mut int = mut_null();
assert!(mp.is_null());
assert!(!mp.is_not_null());
let mq = unsafe { mp.offset(1) };
assert!(!mq.is_null());
assert!(mq.is_not_null());
}
#[test]
fn test_to_option() {
unsafe {
let p: *int = null();
assert_eq!(p.to_option(), None);
let q: *int = &2;
assert_eq!(q.to_option().unwrap(), &2);
let p: *mut int = mut_null();
assert_eq!(p.to_option(), None);
let q: *mut int = &mut 2;
assert_eq!(q.to_option().unwrap(), &2);
}
}
#[test]
fn test_ptr_addition() {
unsafe {
let xs = ~[5, ..16];
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
while ptr < end {
assert_eq!(*ptr, 5);
ptr = ptr.offset(1);
}
let mut xs_mut = xs.clone();
let mut m_ptr = xs_mut.as_mut_ptr();
let m_end = m_ptr.offset(16);
while m_ptr < m_end {
*m_ptr += 5;
m_ptr = m_ptr.offset(1);
}
assert_eq!(xs_mut, ~[10, ..16]);
}
}
#[test]
fn test_ptr_subtraction() {
unsafe {
let xs = ~[0,1,2,3,4,5,6,7,8,9];
let mut idx = 9i8;
let ptr = xs.as_ptr();
while idx >= 0i8 {
assert_eq!(*(ptr.offset(idx as int)), idx as int);
idx = idx - 1i8;
}
let mut xs_mut = xs.clone();
let m_start = xs_mut.as_mut_ptr();
let mut m_ptr = m_start.offset(9);
while m_ptr >= m_start {
*m_ptr += *m_ptr;
m_ptr = m_ptr.offset(-1);
}
assert_eq!(xs_mut, ~[0,2,4,6,8,10,12,14,16,18]);
}
}
#[test]
fn test_ptr_array_each_with_len() {
unsafe {
let one = "oneOne".to_c_str();
let two = "twoTwo".to_c_str();
let three = "threeThree".to_c_str();
let arr = ~[
one.with_ref(|buf| buf),
two.with_ref(|buf| buf),
three.with_ref(|buf| buf),
];
let expected_arr = [
one, two, three
];
let mut ctr = 0;
let mut iteration_count = 0;
array_each_with_len(arr.as_ptr(), arr.len(), |e| {
let actual = str::raw::from_c_str(e);
let expected = expected_arr[ctr].with_ref(|buf| {
str::raw::from_c_str(buf)
});
debug!(
"test_ptr_array_each_with_len e: {}, a: {}",
expected, actual);
assert_eq!(actual, expected);
ctr += 1;
iteration_count += 1;
});
assert_eq!(iteration_count, 3u);
}
}
#[test]
fn test_ptr_array_each() {
unsafe {
let one = "oneOne".to_c_str();
let two = "twoTwo".to_c_str();
let three = "threeThree".to_c_str();
let arr = ~[
one.with_ref(|buf| buf),
two.with_ref(|buf| buf),
three.with_ref(|buf| buf),
// fake a null terminator
null(),
];
let expected_arr = [
one, two, three
];
let arr_ptr = arr.as_ptr();
let mut ctr = 0;
let mut iteration_count = 0;
array_each(arr_ptr, |e| {
let actual = str::raw::from_c_str(e);
let expected = expected_arr[ctr].with_ref(|buf| {
str::raw::from_c_str(buf)
});
debug!(
"test_ptr_array_each e: {}, a: {}",
expected, actual);
assert_eq!(actual, expected);
ctr += 1;
iteration_count += 1;
});
assert_eq!(iteration_count, 3);
}
}
#[test]
#[should_fail]
fn test_ptr_array_each_with_len_null_ptr() {
unsafe {
array_each_with_len(0 as **libc::c_char, 1, |e| {
str::raw::from_c_str(e);
});
}
}
#[test]
#[should_fail]
fn test_ptr_array_each_null_ptr() {
unsafe {
array_each(0 as **libc::c_char, |e| {
str::raw::from_c_str(e);
});
}
}
#[test]
fn test_set_memory() {
let mut xs = [0u8, ..20];
let ptr = xs.as_mut_ptr();
unsafe { set_memory(ptr, 5u8, xs.len()); }
assert!(xs == [5u8, ..20]);
}
}
| test |
game.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unused_assignments)]
use std::alloc::{alloc, Layout};
use std::mem::{align_of, size_of};
extern "C" {
fn mylog(v: isize);
}
pub struct Game {
wid: isize,
hei: isize,
income_reader: IOHelper,
outgo_writer: IOHelper,
}
impl Game {
pub const fn new(wid: isize, hei: isize, io_buf_cap: usize) -> Self {
let income_reader = IOHelper::new(income_buf_ptr, income_cursor_w_ptr, io_buf_cap);
let outgo_writer = IOHelper::new(outgo_buf_ptr, outgo_cursor_w_ptr, io_buf_cap);
Self {
wid,
hei,
income_reader,
outgo_writer,
}
}
pub fn get_args_usize_ptr(&self) -> usize |
fn get_canvas_buf_ptr(&self) -> usize {
self.args_usize[0]
}
fn get_income_buf_ptr(&self) -> usize {
self.args_usize[1]
}
fn get_outgo_buf_ptr(&self) -> usize {
self.args_usize[2]
}
fn set_pixel_color(&self, x: isize, y: isize, r: u8, g: u8, b: u8, a: u8) {
let offset = ((x + y * self.wid) * 4) as usize;
let canvas_buf_ptr: usize = self.get_canvas_buf_ptr();
unsafe {
*((canvas_buf_ptr + offset + 0) as *mut u8) = r;
*((canvas_buf_ptr + offset + 1) as *mut u8) = g;
*((canvas_buf_ptr + offset + 2) as *mut u8) = b;
*((canvas_buf_ptr + offset + 3) as *mut u8) = a;
}
}
pub fn boot(&self) -> usize {
self.set_pixel_color(self.x, self.y, 255, 0, 0, 255);
0
}
pub fn ever(&mut self) -> usize {
self.income_reader.read();
self.recv();
// logic
self.send();
self.render();
0
}
pub fn halt(&self) -> usize {
0
}
pub fn send(&self) {}
pub fn recv(&self) {}
pub fn render(&mut self) {
// let bytes = include_bytes!("../art/fire.rgba");
// for x in 0..96_usize {
// for y in 0..128_usize {
// self.set_pixel_color(
// x as isize,
// y as isize,
// bytes[(x + y * 96) * 4 + 0],
// bytes[(x + y * 96) * 4 + 1],
// bytes[(x + y * 96) * 4 + 2],
// bytes[(x + y * 96) * 4 + 3],
// );
// }
// }
}
}
| {
&(self.args_usize) as *const [usize; PTR_ARGS_COUNT] as *const u8 as usize
} |
test_invoices.py | import csv
import decimal
from datetime import date, datetime
from decimal import Decimal
import random
import json
from django.http import QueryDict
from pytest import mark
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from django_factory_boy import auth as auth_factories
from freezegun import freeze_time
import responses
from assopy.models import Invoice, Order, Vat
from tests.factories import AssopyUserFactory, FareFactory, OrderFactory
from conference.models import AttendeeProfile, Fare, Conference
from conference.invoicing import (
ACPYSS_16,
PYTHON_ITALIA_17,
EPS_18,
CSV_2018_REPORT_COLUMNS,
)
from conference.currencies import (
DAILY_ECB_URL,
EXAMPLE_ECB_DAILY_XML,
EXAMPLE_ECB_DATE,
fetch_and_store_latest_ecb_exrates,
)
from conference.fares import (
pre_create_typical_fares_for_conference,
)
from email_template.models import Email
from tests.common_tools import template_used, make_user
def _prepare_invoice_for_basic_test(order_code, invoice_code):
# default password is 'password123' per django_factory_boy
user = make_user()
# FYI(artcz): Order.objects.create is overloaded method on
# OrderManager, that sets up a lot of unused stuff, going with manual
# .save().
order = Order(user=user.assopy_user, code=order_code)
order.save()
# create some random Vat instance to the invoice creation works
vat_10 = Vat.objects.create(value=10)
return Invoice.objects.create(
code=invoice_code,
order=order,
emit_date=date.today(),
price=Decimal(1337),
vat=vat_10,
html="<html>Here goes full html</html>",
exchange_rate_date=date.today(),
)
@mark.django_db
def test_invoice_html(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="[email protected]", password="password123")
invoice_url = reverse(
"assopy-invoice-html",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert (
response.content.decode("utf-8") == "<html>Here goes full html</html>"
)
@mark.django_db
def test_invoice_pdf(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="[email protected]", password="password123")
invoice_url = reverse(
"assopy-invoice-pdf",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert response.status_code == 200
assert response["Content-type"] == "application/pdf"
def create_order_and_invoice(assopy_user, fare):
order = OrderFactory(user=assopy_user, items=[(fare, {"qty": 1})])
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
# confirm_order by default creates placeholders, but for most of the tests
# we can upgrade them to proper invoices anyway.
invoice = Invoice.objects.get(order=order)
return invoice
@mark.django_db
def test_if_invoice_stores_information_about_the_seller(client):
"""
Testing #591
https://github.com/EuroPython/epcon/issues/591
"""
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
# need this email to generate invoices/orders
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
def invoice_url(invoice):
return reverse(
"assopy-invoice-html",
kwargs={"code": invoice.code, "order_code": invoice.order.code},
)
with freeze_time("2016-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="[email protected]", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/16.0001"
assert invoice.emit_date == date(2016, 1, 1)
assert invoice.issuer == ACPYSS_16
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert ACPYSS_16 in response.content.decode("utf-8")
with freeze_time("2017-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="[email protected]", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/17.0001"
assert invoice.emit_date == date(2017, 1, 1)
assert invoice.issuer == PYTHON_ITALIA_17
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert PYTHON_ITALIA_17 in response.content.decode("utf-8")
with freeze_time("2018-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="[email protected]", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/18.0001"
assert invoice.emit_date == date(2018, 1, 1)
assert invoice.issuer == EPS_18
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert EPS_18 in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_vat_in_GBP_for_2018(client):
"""
https://github.com/EuroPython/epcon/issues/617
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
with freeze_time("2018-05-05"):
client.login(email="[email protected]", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.49")
assert invoice.local_currency == "GBP"
assert invoice.exchange_rate == Decimal("0.89165")
assert invoice.exchange_rate_date == EXAMPLE_ECB_DATE
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# The wording used to be different, so we had both checks in one line,
# but beacuse of template change we had to separate them
assert 'local-currency="GBP"' in content
assert 'total-vat-in-local-currency="1.49"' in content
# we're going to use whatever the date was received/cached from ECB XML
# doesnt matter what emit date is
assert (
"ECB rate used for VAT is 0.89165 GBP/EUR from 2018-03-06"
in content
)
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
with freeze_time("2017-05-05"):
client.login(email="[email protected]", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.67")
assert invoice.local_currency == "EUR"
assert invoice.exchange_rate == Decimal("1.0")
assert invoice.exchange_rate_date == date(2017, 5, 5)
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# not showing any VAT conversion because in 2017 we had just EUR
assert "EUR" in content
assert "Total VAT is" not in content
assert "ECB rate" not in content
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
@mark.django_db
@responses.activate
@freeze_time("2018-05-05")
def test_create_invoice_with_many_items(client):
"""
This test is meant to be used to test invoice template design.
It creates a lot of different items on the invoice, and after that we can
use serve(content) to easily check in the browser how the Invoice looks
like.
Freezing it at 2018 so we can easily check EP2018 invoices.
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
user = make_user()
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
CONFERENCE = settings.CONFERENCE_CONFERENCE
pre_create_typical_fares_for_conference(CONFERENCE, vat_rate_20)
# Don't need to set dates for this test.
# set_early_bird_fare_dates(CONFERENCE, yesterday, tomorrow)
# set_regular_fare_dates(CONFERENCE, yesterday, tomorrow)
random_fares = random.sample(list(Fare.objects.all()), 3)
order = OrderFactory(
user=user.assopy_user,
items=[(fare, {"qty": i}) for i, fare in enumerate(random_fares, 1)],
)
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
@mark.django_db
@responses.activate
def test_export_invoice_csv(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
next(invoice_reader) # skip header
invoice = next(invoice_reader)
iter_column = iter(invoice)
assert next(iter_column) == invoice1.code
assert next(iter_column) == "2018-05-05"
assert next(iter_column) == invoice1.order.user.user.get_full_name()
assert next(iter_column) == invoice1.order.card_name
next(iter_column) # ignore the address
assert next(iter_column) == invoice1.order.country.name
assert next(iter_column) == invoice1.order.vat_number
next(iter_column) # ignore the currency
assert (
decimal.Decimal(next(iter_column))
== invoice1.net_price_in_local_currency
)
assert decimal.Decimal(next(iter_column)) == invoice1.vat_in_local_currency
assert (
decimal.Decimal(next(iter_column)) == invoice1.price_in_local_currency
)
@mark.django_db
@responses.activate
def test_export_invoice_csv_before_period(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-04-05"):
create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 5, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
header = next(invoice_reader)
assert header == CSV_2018_REPORT_COLUMNS
assert next(invoice_reader, None) is None
@mark.django_db
@responses.activate
def test_export_invoice(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("text/html")
assert '<tr id="invoice_{0}">'.format(
invoice1.id
) in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_export_invoice_accounting_json(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_payment_reconciliation_json")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("application/json")
data = json.loads(response.content)["invoices"]
assert len(data) == 1
assert data[0]["ID"] == invoice1.code
assert decimal.Decimal(data[0]["net"]) == invoice1.net_price()
assert decimal.Decimal(data[0]["vat"]) == invoice1.vat_value()
assert decimal.Decimal(data[0]["gross"]) == invoice1.price
assert data[0]["order"] == invoice1.order.code
assert data[0]["stripe"] == invoice1.order.stripe_charge_id
def test_reissue_invoice(admin_client):
| Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
invoice_code, order_code = "I123", "asdf"
invoice = _prepare_invoice_for_basic_test(order_code, invoice_code)
NEW_CUSTOMER = "NEW CUSTOMER"
assert Invoice.objects.all().count() == 1
assert NEW_CUSTOMER not in Invoice.objects.latest("id").html
url = reverse("debug_panel_reissue_invoice", args=[invoice.id])
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {"emit_date": "2018-01-01", "customer": NEW_CUSTOMER}
)
assert response.status_code == 302
assert Invoice.objects.all().count() == 2
assert NEW_CUSTOMER in Invoice.objects.latest("id").html |
|
FlorenceExceptions.py | class JacobianError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None:
self.value = 'Jacobian of mapping is close to zero'
return repr(self.value)
class IllConditionedError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None: | self.value = 'Matrix is ill conditioned'
return repr(self.value) | |
lib.rs | // Copyright 2020-2021, The Tremor Team
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Influx line protocol decoder to `simd_json::Value`
//!
//! The translate is done to the following schema:
//!
//! ```json
//! {
//! "measurement": "<name>",
//! "tags": {
//! "tag1": "<value>"
//! },
//! "fields": {
//! "field1": 123
//! },
//! "timestamp": 456
//! }
//! ```
#![deny(missing_docs)]
#![recursion_limit = "1024"]
#![deny(
clippy::all,
clippy::unwrap_used,
clippy::unnecessary_unwrap,
clippy::pedantic
)]
mod decoder;
mod encoder;
/// Errors
pub mod errors;
pub use decoder::decode;
pub use encoder::encode;
pub use errors::*;
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use simd_json::{json, BorrowedValue};
#[test]
fn unparse_test() {
let s = "weather,location=us-midwest temperature=82 1465839830100400200";
let d = decode(s, 0)
.expect("failed to parse")
.expect("failed to parse");
// This is a bit ugly but to make a sensible comparison we got to convert the data
// from an object to json to an object
let j: BorrowedValue = d;
let e: BorrowedValue = json!({
"measurement": "weather",
"tags": {"location": "us-midwest"},
"fields": {"temperature": 82.0},
"timestamp": 1_465_839_830_100_400_200_i64
})
.into();
assert_eq!(e, j)
}
#[test]
fn unparse_empty() {
let s = "";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
let s = " ";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
let s = " \n";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
let s = " \t \n";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
}
#[test]
fn unparse_comment() {
let s = "# bla";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
let s = " # bla";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
let s = " \t \n# bla";
let d: Option<BorrowedValue> = decode(s, 0).expect("failed to parse");
assert!(d.is_none());
}
#[test]
fn parse_simple() {
let s = "weather,location=us-midwest temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_simple2() {
let s = "weather,location=us-midwest,season=summer temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest",
"season": "summer"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_example() {
let mut s = br#"{"measurement":"swap","tags":{"host":"56a6f1b85709","window":"10secs"},"fields":{"count_free":2,"min_free":2139095040,"max_free":2147483647,"mean_free":2143289344.0,"stdev_free":0.0,"var_free":0.0,"p50_free":2147483647,"p90_free":2147483647,"p99_free":2147483647,"p99.9_free":2147483647},"timestamp":1465839830100400200}"#.to_vec();
let v = simd_json::borrowed::to_value(s.as_mut_slice()).unwrap();
encode(&v).unwrap();
}
#[test]
fn parse_simple3() {
let s =
"weather,location=us-midwest temperature=82,bug_concentration=98 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": 82.0,
"bug_concentration": 98.0,
},
"timestamp": 1_465_839_830_100_400_200u64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_no_timestamp() {
let s = "weather temperature=82i";
let parsed = decode(s, 1_465_839_830_100_400_200u64).expect("failed to parse");
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {},
"fields": {
"temperature": 82
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Some(r), parsed);
}
#[test]
fn parse_float_value() {
let s = "weather temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_int_value() {
let s = "weather temperature=82i 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {},
"fields": {
"temperature": 82
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_str_value() {
let s = "weather,location=us-midwest temperature=\"too warm\" 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": "too warm"
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_true_value() {
let sarr = &[
"weather,location=us-midwest too_hot=true 1465839830100400200",
"weather,location=us-midwest too_hot=True 1465839830100400200",
"weather,location=us-midwest too_hot=TRUE 1465839830100400200",
"weather,location=us-midwest too_hot=t 1465839830100400200",
"weather,location=us-midwest too_hot=T 1465839830100400200",
];
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"too_hot": true
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
for s in sarr {
assert_eq!(Ok(Some(r.clone())), decode(s, 0));
}
}
#[test]
fn parse_false_value() {
let sarr = &[
"weather,location=us-midwest too_hot=false 1465839830100400200",
"weather,location=us-midwest too_hot=False 1465839830100400200",
"weather,location=us-midwest too_hot=FALSE 1465839830100400200",
"weather,location=us-midwest too_hot=f 1465839830100400200",
"weather,location=us-midwest too_hot=F 1465839830100400200",
];
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"too_hot": false
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
for s in sarr {
assert_eq!(Ok(Some(r.clone())), decode(s, 0));
}
}
#[test]
fn parse_escape01() {
let s = "weather,location=us\\,midwest temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us,midwest"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape02() {
let s = "weather,location=us-midwest temp\\=rature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temp=rature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape03() {
let s = "weather,location\\ place=us-midwest temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location place": "us-midwest"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape04() {
let s = "wea\\,ther,location=us-midwest temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "wea,ther",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape05() {
let s = "wea\\ ther,location=us-midwest temperature=82 1465839830100400200";
let r: BorrowedValue = json!({
"measurement": "wea ther",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": 82.0
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape06() {
let s = r#"weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200"#;
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature": r#"too"hot""#
},
"timestamp": 1_465_839_830_100_400_200i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn | () {
let s = "weather,location=us-midwest temperature_str=\"too hot/cold\" 1465839830100400201";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot/cold"
},
"timestamp": 1_465_839_830_100_400_201i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0));
}
#[test]
fn parse_escape08() {
let s = "weather,location=us-midwest temperature_str=\"too hot\\cold\" 1465839830100400202";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot\\cold"
},
"timestamp": 1_465_839_830_100_400_202i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape09() {
let s =
"weather,location=us-midwest temperature_str=\"too hot\\\\cold\" 1465839830100400203";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot\\cold"
},
"timestamp": 1_465_839_830_100_400_203i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape10() {
let s =
"weather,location=us-midwest temperature_str=\"too hot\\\\\\cold\" 1465839830100400204";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot\\\\cold"
},
"timestamp": 1_465_839_830_100_400_204i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape11() {
let s = "weather,location=us-midwest temperature_str=\"too hot\\\\\\\\cold\" 1465839830100400205";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot\\\\cold"
},
"timestamp": 1_465_839_830_100_400_205i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
#[test]
fn parse_escape12() {
let s = "weather,location=us-midwest temperature_str=\"too hot\\\\\\\\\\cold\" 1465839830100400206";
let r: BorrowedValue = json!({
"measurement": "weather",
"tags": {
"location": "us-midwest"
},
"fields": {
"temperature_str": "too hot\\\\\\cold"
},
"timestamp": 1_465_839_830_100_400_206i64,
})
.into();
assert_eq!(Ok(Some(r)), decode(s, 0))
}
}
| parse_escape07 |
encoder.py | from generative_playground.models.encoder.basic_cnn import SimpleCNNEncoder
from generative_playground.models.encoder.basic_rnn import SimpleRNN
from generative_playground.models.heads.attention_aggregating_head import AttentionAggregatingHead
from generative_playground.models.transformer.Models import TransformerEncoder
from generative_playground.utils.gpu_utils import to_gpu
def get_encoder(feature_len=12,
max_seq_length=15,
cnn_encoder_params={'kernel_sizes': (2, 3, 4),
'filters': (2, 3, 4),
'dense_size': 100},
drop_rate=0.0,
encoder_type='cnn',
rnn_encoder_hidden_n=200):
if encoder_type == 'rnn':
rnn_model = SimpleRNN(hidden_n=rnn_encoder_hidden_n, |
elif encoder_type == 'cnn':
encoder = to_gpu(SimpleCNNEncoder(params=cnn_encoder_params,
max_seq_length=max_seq_length,
feature_len=feature_len,
drop_rate=drop_rate))
elif encoder_type == 'attention':
encoder = to_gpu(AttentionAggregatingHead(TransformerEncoder(feature_len,
max_seq_length,
dropout=drop_rate,
padding_idx=feature_len - 1),
drop_rate=drop_rate))
else:
raise NotImplementedError()
return encoder | feature_len=feature_len,
drop_rate=drop_rate)
encoder = to_gpu(AttentionAggregatingHead(rnn_model, drop_rate=drop_rate)) |
server.py |
# Restfull api using falcon framework
from wsgiref import simple_server
import falcon
import json
#Resource endpoints import
from cartsResource import *
from productsResource import *
# Check that client has application/json in Accept header
# and Content-Type, if request has body
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.',
href='http://docs.examples.com/api/json')
if req.method in ('POST', 'PUT'):
|
#JSON builder func for both incoming and outgoing messages
class JSONBuilder(object):
def process_request(self, req, resp):
if req.content_length in (None, 0):
# Nothing to do
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
req.context['doc'] = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'])
api = falcon.API(middleware=[
RequireJSON(),
JSONBuilder()
])
#Add api endpoints
products = ProductsResource()
api.add_route('/api/products', products)
product = ProductResource()
api.add_route('/api/products/{productId}', product)
shopcart = ShopCartResource()
api.add_route('/api/shopcarts/{userId}', shopcart)
shopcartProducts = ShopCartProductsResource()
api.add_route('/api/shopcarts/{userId}/products', shopcartProducts)
#Start the server
if __name__ == '__main__':
print("Staring server in 127.0.0.1:8000")
httpd = simple_server.make_server('127.0.0.1', 8000, api)
httpd.serve_forever()
| if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.',
href='http://docs.examples.com/api/json') |
job-overview-drawer-flamegraph.component.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Component, OnInit, ChangeDetectionStrategy, OnDestroy, ChangeDetectorRef } from '@angular/core';
import { Subject } from 'rxjs';
import { flatMap, takeUntil, tap } from 'rxjs/operators';
import { JobFlameGraphInterface, NodesItemCorrectInterface } from 'interfaces';
import { JobService } from 'services';
@Component({
selector: 'flink-job-overview-drawer-flamegraph',
templateUrl: './job-overview-drawer-flamegraph.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
styleUrls: ['./job-overview-drawer-flamegraph.component.less']
})
export class | implements OnInit, OnDestroy {
destroy$ = new Subject();
isLoading = true;
now = Date.now();
selectedVertex: NodesItemCorrectInterface | null;
flameGraph = {} as JobFlameGraphInterface;
graphType = 'on_cpu';
constructor(private jobService: JobService, private cdr: ChangeDetectorRef) {}
ngOnInit() {
this.requestFlameGraph();
}
private requestFlameGraph() {
this.jobService.jobWithVertex$
.pipe(
takeUntil(this.destroy$),
tap(data => (this.selectedVertex = data.vertex)),
flatMap(data => this.jobService.loadOperatorFlameGraph(data.job.jid, data.vertex!.id, this.graphType))
)
.subscribe(
data => {
this.now = Date.now();
if (this.flameGraph.endTimestamp !== data['endTimestamp']) {
this.isLoading = false;
this.flameGraph = data;
this.flameGraph.graphType = this.graphType;
}
this.cdr.markForCheck();
},
() => {
this.isLoading = false;
this.cdr.markForCheck();
}
);
}
selectFrameGraphType() {
this.destroy$.next();
this.destroy$.complete();
this.destroy$ = new Subject();
this.flameGraph = {} as JobFlameGraphInterface;
this.requestFlameGraph();
}
ngOnDestroy() {
this.destroy$.next();
this.destroy$.complete();
}
}
| JobOverviewDrawerFlameGraphComponent |
proc_macro_harness.rs | use std::mem;
use rustc_ast::attr;
use rustc_ast::ptr::P;
use rustc_ast::visit::{self, Visitor};
use rustc_ast::{self as ast, NodeId};
use rustc_ast_pretty::pprust;
use rustc_expand::base::{parse_macro_name_and_helper_attrs, ExtCtxt, ResolverExpand};
use rustc_expand::expand::{AstFragment, ExpansionConfig};
use rustc_session::Session;
use rustc_span::hygiene::AstPass;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use smallvec::smallvec;
use std::cell::RefCell;
struct ProcMacroDerive {
id: NodeId,
trait_name: Symbol,
function_name: Ident,
span: Span,
attrs: Vec<Symbol>,
}
enum ProcMacroDefType {
Attr,
Bang,
}
struct ProcMacroDef {
id: NodeId,
function_name: Ident,
span: Span,
def_type: ProcMacroDefType,
}
enum ProcMacro {
Derive(ProcMacroDerive),
Def(ProcMacroDef),
}
struct CollectProcMacros<'a> {
sess: &'a Session,
macros: Vec<ProcMacro>,
in_root: bool,
handler: &'a rustc_errors::Handler,
source_map: &'a SourceMap,
is_proc_macro_crate: bool,
is_test_crate: bool,
}
pub fn inject(
sess: &Session,
resolver: &mut dyn ResolverExpand,
mut krate: ast::Crate,
is_proc_macro_crate: bool,
has_proc_macro_decls: bool,
is_test_crate: bool,
num_crate_types: usize,
handler: &rustc_errors::Handler,
) -> ast::Crate {
let ecfg = ExpansionConfig::default("proc_macro".to_string());
let mut cx = ExtCtxt::new(sess, ecfg, resolver, None);
let mut collect = CollectProcMacros {
sess,
macros: Vec::new(),
in_root: true,
handler,
source_map: sess.source_map(),
is_proc_macro_crate,
is_test_crate,
};
if has_proc_macro_decls || is_proc_macro_crate {
visit::walk_crate(&mut collect, &krate);
}
let macros = collect.macros;
if !is_proc_macro_crate {
return krate;
}
if num_crate_types > 1 {
handler.err("cannot mix `proc-macro` crate type with others");
}
if is_test_crate {
return krate;
}
let decls = mk_decls(&mut krate, &mut cx, ¯os);
krate.items.push(decls);
krate
}
impl<'a> CollectProcMacros<'a> {
fn check_not_pub_in_root(&self, vis: &ast::Visibility, sp: Span) {
if self.is_proc_macro_crate && self.in_root && vis.kind.is_pub() {
self.handler.span_err(
sp,
"`proc-macro` crate types currently cannot export any items other \
than functions tagged with `#[proc_macro]`, `#[proc_macro_derive]`, \
or `#[proc_macro_attribute]`",
);
}
}
fn collect_custom_derive(&mut self, item: &'a ast::Item, attr: &'a ast::Attribute) {
let (trait_name, proc_attrs) =
match parse_macro_name_and_helper_attrs(self.handler, attr, "derive") {
Some(name_and_attrs) => name_and_attrs,
None => return,
};
if self.in_root && item.vis.kind.is_pub() {
self.macros.push(ProcMacro::Derive(ProcMacroDerive {
id: item.id,
span: item.span,
trait_name,
function_name: item.ident,
attrs: proc_attrs,
}));
} else {
let msg = if !self.in_root {
"functions tagged with `#[proc_macro_derive]` must \
currently reside in the root of the crate"
} else {
"functions tagged with `#[proc_macro_derive]` must be `pub`"
};
self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
}
}
fn collect_attr_proc_macro(&mut self, item: &'a ast::Item) {
if self.in_root && item.vis.kind.is_pub() {
self.macros.push(ProcMacro::Def(ProcMacroDef {
id: item.id,
span: item.span,
function_name: item.ident,
def_type: ProcMacroDefType::Attr,
}));
} else {
let msg = if !self.in_root {
"functions tagged with `#[proc_macro_attribute]` must \
currently reside in the root of the crate"
} else {
"functions tagged with `#[proc_macro_attribute]` must be `pub`"
};
self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
}
}
fn collect_bang_proc_macro(&mut self, item: &'a ast::Item) {
if self.in_root && item.vis.kind.is_pub() {
self.macros.push(ProcMacro::Def(ProcMacroDef {
id: item.id,
span: item.span,
function_name: item.ident,
def_type: ProcMacroDefType::Bang,
}));
} else {
let msg = if !self.in_root {
"functions tagged with `#[proc_macro]` must \
currently reside in the root of the crate"
} else {
"functions tagged with `#[proc_macro]` must be `pub`"
};
self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
}
}
}
impl<'a> Visitor<'a> for CollectProcMacros<'a> {
fn visit_item(&mut self, item: &'a ast::Item) {
if let ast::ItemKind::MacroDef(..) = item.kind {
if self.is_proc_macro_crate && self.sess.contains_name(&item.attrs, sym::macro_export) {
let msg =
"cannot export macro_rules! macros from a `proc-macro` crate type currently";
self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
}
}
// First up, make sure we're checking a bare function. If we're not then
// we're just not interested in this item.
//
// If we find one, try to locate a `#[proc_macro_derive]` attribute on it.
let is_fn = matches!(item.kind, ast::ItemKind::Fn(..));
let mut found_attr: Option<&'a ast::Attribute> = None;
for attr in &item.attrs {
if self.sess.is_proc_macro_attr(&attr) {
if let Some(prev_attr) = found_attr {
let prev_item = prev_attr.get_normal_item();
let item = attr.get_normal_item();
let path_str = pprust::path_to_string(&item.path);
let msg = if item.path.segments[0].ident.name
== prev_item.path.segments[0].ident.name
{
format!(
"only one `#[{}]` attribute is allowed on any given function",
path_str,
)
} else {
format!(
"`#[{}]` and `#[{}]` attributes cannot both be applied
to the same function",
path_str,
pprust::path_to_string(&prev_item.path),
)
};
self.handler
.struct_span_err(attr.span, &msg)
.span_label(prev_attr.span, "previous attribute here")
.emit();
return;
}
found_attr = Some(attr);
}
}
let attr = match found_attr {
None => {
self.check_not_pub_in_root(&item.vis, self.source_map.guess_head_span(item.span));
let prev_in_root = mem::replace(&mut self.in_root, false);
visit::walk_item(self, item);
self.in_root = prev_in_root;
return;
}
Some(attr) => attr,
};
if !is_fn {
let msg = format!(
"the `#[{}]` attribute may only be used on bare functions",
pprust::path_to_string(&attr.get_normal_item().path),
);
self.handler.span_err(attr.span, &msg);
return;
}
if self.is_test_crate {
return;
}
if !self.is_proc_macro_crate {
let msg = format!(
"the `#[{}]` attribute is only usable with crates of the `proc-macro` crate type",
pprust::path_to_string(&attr.get_normal_item().path),
);
self.handler.span_err(attr.span, &msg);
return;
}
if attr.has_name(sym::proc_macro_derive) {
self.collect_custom_derive(item, attr);
} else if attr.has_name(sym::proc_macro_attribute) {
self.collect_attr_proc_macro(item);
} else if attr.has_name(sym::proc_macro) {
self.collect_bang_proc_macro(item);
};
let prev_in_root = mem::replace(&mut self.in_root, false); | }
}
// Creates a new module which looks like:
//
// const _: () = {
// extern crate proc_macro;
//
// use proc_macro::bridge::client::ProcMacro;
//
// #[rustc_proc_macro_decls]
// #[allow(deprecated)]
// static DECLS: &[ProcMacro] = &[
// ProcMacro::custom_derive($name_trait1, &[], ::$name1);
// ProcMacro::custom_derive($name_trait2, &["attribute_name"], ::$name2);
// // ...
// ];
// }
fn mk_decls(
ast_krate: &mut ast::Crate,
cx: &mut ExtCtxt<'_>,
macros: &[ProcMacro],
) -> P<ast::Item> {
// We're the ones filling in this Vec,
// so it should be empty to start with
assert!(ast_krate.proc_macros.is_empty());
let expn_id = cx.resolver.expansion_for_ast_pass(
DUMMY_SP,
AstPass::ProcMacroHarness,
&[sym::rustc_attrs, sym::proc_macro_internals],
None,
);
let span = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
let proc_macro = Ident::new(sym::proc_macro, span);
let krate = cx.item(span, proc_macro, Vec::new(), ast::ItemKind::ExternCrate(None));
let bridge = Ident::new(sym::bridge, span);
let client = Ident::new(sym::client, span);
let proc_macro_ty = Ident::new(sym::ProcMacro, span);
let custom_derive = Ident::new(sym::custom_derive, span);
let attr = Ident::new(sym::attr, span);
let bang = Ident::new(sym::bang, span);
let krate_ref = RefCell::new(ast_krate);
// We add NodeIds to 'krate.proc_macros' in the order
// that we generate expressions. The position of each NodeId
// in the 'proc_macros' Vec corresponds to its position
// in the static array that will be generated
let decls = {
let local_path =
|sp: Span, name| cx.expr_path(cx.path(sp.with_ctxt(span.ctxt()), vec![name]));
let proc_macro_ty_method_path = |method| {
cx.expr_path(cx.path(span, vec![proc_macro, bridge, client, proc_macro_ty, method]))
};
macros
.iter()
.map(|m| match m {
ProcMacro::Derive(cd) => {
krate_ref.borrow_mut().proc_macros.push(cd.id);
cx.expr_call(
span,
proc_macro_ty_method_path(custom_derive),
vec![
cx.expr_str(cd.span, cd.trait_name),
cx.expr_vec_slice(
span,
cd.attrs
.iter()
.map(|&s| cx.expr_str(cd.span, s))
.collect::<Vec<_>>(),
),
local_path(cd.span, cd.function_name),
],
)
}
ProcMacro::Def(ca) => {
krate_ref.borrow_mut().proc_macros.push(ca.id);
let ident = match ca.def_type {
ProcMacroDefType::Attr => attr,
ProcMacroDefType::Bang => bang,
};
cx.expr_call(
span,
proc_macro_ty_method_path(ident),
vec![
cx.expr_str(ca.span, ca.function_name.name),
local_path(ca.span, ca.function_name),
],
)
}
})
.collect()
};
let decls_static = cx
.item_static(
span,
Ident::new(sym::_DECLS, span),
cx.ty_rptr(
span,
cx.ty(
span,
ast::TyKind::Slice(
cx.ty_path(cx.path(span, vec![proc_macro, bridge, client, proc_macro_ty])),
),
),
None,
ast::Mutability::Not,
),
ast::Mutability::Not,
cx.expr_vec_slice(span, decls),
)
.map(|mut i| {
let attr = cx.meta_word(span, sym::rustc_proc_macro_decls);
i.attrs.push(cx.attribute(attr));
let deprecated_attr = attr::mk_nested_word_item(Ident::new(sym::deprecated, span));
let allow_deprecated_attr =
attr::mk_list_item(Ident::new(sym::allow, span), vec![deprecated_attr]);
i.attrs.push(cx.attribute(allow_deprecated_attr));
i
});
let block = cx.expr_block(
cx.block(span, vec![cx.stmt_item(span, krate), cx.stmt_item(span, decls_static)]),
);
let anon_constant = cx.item_const(
span,
Ident::new(kw::Underscore, span),
cx.ty(span, ast::TyKind::Tup(Vec::new())),
block,
);
// Integrate the new item into existing module structures.
let items = AstFragment::Items(smallvec![anon_constant]);
cx.monotonic_expander().fully_expand_fragment(items).make_items().pop().unwrap()
} | visit::walk_item(self, item);
self.in_root = prev_in_root; |
__init__.py |
from torch.utils.ffi import _wrap_function
from ._rod_align import lib as _lib, ffi as _ffi
__all__ = []
def | (locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
_import_symbols(locals())
| _import_symbols |
tests.rs | use super::lexer::{Lexer, MultiStringToken, NormalToken, StringToken, Token};
use crate::error::ParseError;
use crate::identifier::Ident;
use crate::parser::error::ParseError as InternalParseError;
use crate::term::make as mk_term;
use crate::term::Term::*;
use crate::term::{BinaryOp, RichTerm, StrChunk, UnaryOp};
use crate::{mk_app, mk_switch};
use assert_matches::assert_matches;
use codespan::Files;
fn parse(s: &str) -> Result<RichTerm, ParseError> {
let id = Files::new().add("<test>", String::from(s));
super::grammar::TermParser::new()
.parse_term(id, Lexer::new(&s))
.map_err(|errs| errs.errors.first().unwrap().clone())
}
fn parse_without_pos(s: &str) -> RichTerm {
parse(s).unwrap().without_pos()
}
fn lex(s: &str) -> Result<Vec<(usize, Token, usize)>, InternalParseError> {
Lexer::new(s).collect()
}
fn lex_without_pos(s: &str) -> Result<Vec<Token>, InternalParseError> {
lex(s).map(|v| v.into_iter().map(|(_, tok, _)| tok).collect())
}
/// Wrap a single string literal in a `StrChunks`.
fn mk_single_chunk(s: &str) -> RichTerm {
StrChunks(vec![StrChunk::Literal(String::from(s))]).into()
}
#[test]
fn numbers() {
assert_eq!(parse_without_pos("22"), Num(22.0).into());
assert_eq!(parse_without_pos("22.0"), Num(22.0).into());
assert_eq!(parse_without_pos("22.22"), Num(22.22).into());
assert_eq!(parse_without_pos("(22)"), Num(22.0).into());
assert_eq!(parse_without_pos("((22))"), Num(22.0).into());
}
#[test]
fn strings() {
assert_eq!(
parse_without_pos("\"hello world\""),
mk_single_chunk("hello world"),
);
assert_eq!(
parse_without_pos("\"hello \nworld\""),
mk_single_chunk("hello \nworld")
);
assert_eq!(
parse_without_pos("\"hello Dimension C-132!\""),
mk_single_chunk("hello Dimension C-132!")
);
assert_eq!(
parse_without_pos("\"hello\" ++ \"World\" ++ \"!!\" "),
Op2(
BinaryOp::StrConcat(),
Op2(
BinaryOp::StrConcat(),
mk_single_chunk("hello"),
mk_single_chunk("World"),
)
.into(),
mk_single_chunk("!!"),
)
.into()
)
}
#[test]
fn | () {
assert_eq!(
parse_without_pos("3 + 4"),
Op2(BinaryOp::Plus(), Num(3.0).into(), Num(4.).into()).into()
);
assert_eq!(
parse_without_pos("(true + false) + 4"),
Op2(
BinaryOp::Plus(),
Op2(BinaryOp::Plus(), Bool(true).into(), Bool(false).into()).into(),
Num(4.).into(),
)
.into()
);
}
#[test]
fn booleans() {
assert_eq!(parse_without_pos("true"), Bool(true).into());
assert_eq!(parse_without_pos("false"), Bool(false).into());
}
#[test]
fn ite() {
assert_eq!(
parse_without_pos("if true then 3 else 4"),
mk_app!(mk_term::op1(UnaryOp::Ite(), Bool(true)), Num(3.0), Num(4.0))
);
}
#[test]
fn applications() {
assert_eq!(
parse_without_pos("1 true 2"),
mk_app!(Num(1.0), Bool(true), Num(2.0))
);
assert_eq!(
parse_without_pos("1 (2 3) 4"),
mk_app!(Num(1.0), mk_app!(Num(2.0), Num(3.0)), Num(4.0))
);
}
#[test]
fn variables() {
assert!(parse("x1_x_").is_ok());
}
#[test]
fn functions() {
assert_eq!(
crate::transform::desugar_destructuring::desugar_fun(parse_without_pos("fun x => x")),
mk_term::id()
);
}
#[test]
fn lets() {
assert_matches!(parse("let x1 = x2 in x3"), Ok(..));
assert_matches!(parse("x (let x1 = x2 in x3) y"), Ok(..));
}
#[test]
fn unary_op() {
assert_eq!(
parse_without_pos("%is_num% x"),
mk_term::op1(UnaryOp::IsNum(), mk_term::var("x"))
);
assert_eq!(
parse_without_pos("%is_num% x y"),
mk_app!(
mk_term::op1(UnaryOp::IsNum(), mk_term::var("x")),
mk_term::var("y")
),
);
}
#[test]
fn enum_terms() {
assert_eq!(parse_without_pos("`foo"), Enum(Ident::from("foo")).into(),);
assert_eq!(
parse_without_pos("`\"foo:bar\""),
Enum(Ident::from("foo:bar")).into(),
);
assert_eq!(
parse_without_pos("switch { `foo => true, `bar => false, _ => 456, } 123"),
mk_switch!(Num(123.), ("foo", Bool(true)), ("bar", Bool(false)) ; Num(456.))
)
}
#[test]
fn record_terms() {
assert_eq!(
parse_without_pos("{ a = 1, b = 2, c = 3}"),
RecRecord(
vec![
(Ident::from("a"), Num(1.).into()),
(Ident::from("b"), Num(2.).into()),
(Ident::from("c"), Num(3.).into()),
]
.into_iter()
.collect(),
Vec::new(),
Default::default()
)
.into()
);
assert_eq!(
parse_without_pos("{ a = 1, \"#{123}\" = (if 4 then 5 else 6), d = 42}"),
RecRecord(
vec![
(Ident::from("a"), Num(1.).into()),
(Ident::from("d"), Num(42.).into()),
]
.into_iter()
.collect(),
vec![(
StrChunks(vec![StrChunk::expr(RichTerm::from(Num(123.)))]).into(),
mk_app!(mk_term::op1(UnaryOp::Ite(), Num(4.)), Num(5.), Num(6.))
)],
Default::default(),
)
.into()
);
assert_eq!(
parse_without_pos("{ a = 1, \"\\\"#}#\" = 2}"),
RecRecord(
vec![
(Ident::from("a"), Num(1.).into()),
(Ident::from("\"#}#"), Num(2.).into()),
]
.into_iter()
.collect(),
Vec::new(),
Default::default(),
)
.into()
);
}
#[test]
fn string_lexing() {
assert_eq!(
lex_without_pos("\"Good\" \"strings\""),
Ok(vec![
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::Literal("Good")),
Token::Normal(NormalToken::DoubleQuote),
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::Literal("strings")),
Token::Normal(NormalToken::DoubleQuote),
])
);
assert_eq!(
lex_without_pos("\"Good\\nEscape\\t\\\"\""),
Ok(vec![
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::Literal("Good")),
Token::Str(StringToken::EscapedChar('\n')),
Token::Str(StringToken::Literal("Escape")),
Token::Str(StringToken::EscapedChar('\t')),
Token::Str(StringToken::EscapedChar('\"')),
Token::Normal(NormalToken::DoubleQuote),
])
);
assert_eq!(
lex_without_pos("\"1 + #{ 1 } + 2\""),
Ok(vec![
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::Literal("1 + ")),
Token::Str(StringToken::HashBrace),
Token::Normal(NormalToken::NumLiteral(1.0)),
Token::Normal(NormalToken::RBrace),
Token::Str(StringToken::Literal(" + 2")),
Token::Normal(NormalToken::DoubleQuote),
])
);
assert_eq!(
lex_without_pos("\"1 + #{ \"#{ 1 }\" } + 2\""),
Ok(vec![
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::Literal("1 + ")),
Token::Str(StringToken::HashBrace),
Token::Normal(NormalToken::DoubleQuote),
Token::Str(StringToken::HashBrace),
Token::Normal(NormalToken::NumLiteral(1.0)),
Token::Normal(NormalToken::RBrace),
Token::Normal(NormalToken::DoubleQuote),
Token::Normal(NormalToken::RBrace),
Token::Str(StringToken::Literal(" + 2")),
Token::Normal(NormalToken::DoubleQuote),
])
);
assert_eq!(
lex_without_pos(r##"m#""#"#m"##),
Ok(vec![
Token::Normal(NormalToken::MultiStringStart(3)),
Token::MultiStr(MultiStringToken::Literal("\"")),
Token::MultiStr(MultiStringToken::Literal("#")),
Token::MultiStr(MultiStringToken::End),
])
);
}
#[test]
fn str_escape() {
assert_matches!(
parse("\"bad escape \\g\""),
Err(ParseError::InvalidEscapeSequence(..))
);
assert_eq!(
parse_without_pos(r#""str\twith\nescapes""#),
mk_single_chunk("str\twith\nescapes"),
);
assert_eq!(
parse_without_pos("\"\\#\\#{ }\\#\""),
mk_single_chunk("##{ }#"),
);
assert_eq!(
parse_without_pos("\"#a#b#c\\#{d#\""),
mk_single_chunk("#a#b#c#{d#"),
);
}
#[test]
fn ascii_escape() {
assert_matches!(
parse("\"\\x[f\""),
Err(ParseError::InvalidEscapeSequence(..))
);
assert_matches!(
parse("\"\\x0\""),
Err(ParseError::InvalidEscapeSequence(..))
);
assert_matches!(
parse("\"\\x0z\""),
Err(ParseError::InvalidEscapeSequence(..))
);
assert_matches!(
parse("\"\\x80\""),
Err(ParseError::InvalidAsciiEscapeCode(..))
);
assert_matches!(
parse("\"\\xab\""),
Err(ParseError::InvalidAsciiEscapeCode(..))
);
assert_matches!(
parse("\"\\xFF\""),
Err(ParseError::InvalidAsciiEscapeCode(..))
);
assert_eq!(parse_without_pos("\"\\x00\""), mk_single_chunk("\x00"));
assert_eq!(parse_without_pos("\"\\x08\""), mk_single_chunk("\x08"));
assert_eq!(parse_without_pos("\"\\x7F\""), mk_single_chunk("\x7F"));
assert_eq!(parse_without_pos("m#\"\\x[f\"#m"), mk_single_chunk("\\x[f"));
assert_eq!(parse_without_pos("m#\"\\x0\"#m"), mk_single_chunk("\\x0"));
assert_eq!(parse_without_pos("m#\"\\x0z\"#m"), mk_single_chunk("\\x0z"));
assert_eq!(parse_without_pos("m#\"\\x00\"#m"), mk_single_chunk("\\x00"));
assert_eq!(parse_without_pos("m#\"\\x08\"#m"), mk_single_chunk("\\x08"));
assert_eq!(parse_without_pos("m#\"\\x7F\"#m"), mk_single_chunk("\\x7F"));
}
/// Regression test for [#230](https://github.com/tweag/nickel/issues/230).
#[test]
fn multiline_str_escape() {
assert_eq!(
parse_without_pos(r##"m#"#Hel##lo###"#m"##),
mk_single_chunk("#Hel##lo###"),
);
assert_eq!(
parse_without_pos(r##"m#"#Hel##{lo###{"#m"##),
mk_single_chunk("#Hel##{lo###{"),
);
}
#[test]
fn line_comments() {
assert_eq!(
parse_without_pos("// 1 +\n1 + 1// + 3\n//+ 2"),
parse_without_pos("1 + 1")
);
assert_eq!(
parse_without_pos(
"{ // Some comment
field = foo, // Some description
} // Some other"
),
parse_without_pos("{field = foo}")
);
}
#[test]
fn unbound_type_variables() {
// should fail, "a" is unbound
assert_matches!(
parse("1 | a"),
Err(ParseError::UnboundTypeVariables(unbound_vars, _)) if (unbound_vars.contains(&Ident::from("a")) && unbound_vars.len() == 1)
);
// should fail, "d" is unbound
assert_matches!(
parse("null: forall a b c. a -> (b -> List c) -> {foo : List {_ : d}, bar: b | Dyn}"),
Err(ParseError::UnboundTypeVariables(unbound_vars, _)) if (unbound_vars.contains(&Ident::from("d")) && unbound_vars.len() == 1)
);
// should fail, "e" is unbound
assert_matches!(
parse("null: forall a b c. a -> (b -> List c) -> {foo : List {_ : a}, bar: b | e}"),
Err(ParseError::UnboundTypeVariables(unbound_vars, _)) if (unbound_vars.contains(&Ident::from("e")) && unbound_vars.len() == 1)
);
// should fail, "a" is unbound
assert_matches!(
parse("null: a -> (forall a. a -> a)"),
Err(ParseError::UnboundTypeVariables(unbound_vars, _)) if (unbound_vars.contains(&Ident::from("a")) && unbound_vars.len() == 1)
);
}
| plus |
scaled_font.rs | use ffi;
#[cfg(feature = "use_glib")]
use glib::translate::*;
use std::ffi::CString;
use std::ptr;
use enums::FontType;
use ffi::{FontExtents, Glyph, TextCluster, TextExtents};
use matrices::Matrix;
use utils::status_to_result;
use super::{FontFace, FontOptions};
#[cfg(feature = "use_glib")]
glib_wrapper! {
#[derive(Debug)]
pub struct ScaledFont(Shared<ffi::cairo_scaled_font_t>);
match fn {
ref => |ptr| ffi::cairo_scaled_font_reference(ptr),
unref => |ptr| ffi::cairo_scaled_font_destroy(ptr),
get_type => || ffi::gobject::cairo_gobject_scaled_font_get_type(),
}
}
#[cfg(not(feature = "use_glib"))]
#[derive(Debug)]
pub struct ScaledFont(ptr::NonNull<ffi::cairo_scaled_font_t>);
impl ScaledFont {
pub fn new(
font_face: &FontFace,
font_matrix: &Matrix,
ctm: &Matrix,
options: &FontOptions,
) -> ScaledFont {
let scaled_font: ScaledFont = unsafe {
ScaledFont::from_raw_full(ffi::cairo_scaled_font_create(
font_face.to_raw_none(),
font_matrix.ptr(),
ctm.ptr(),
options.to_raw_none(),
))
};
let status = unsafe { ffi::cairo_scaled_font_status(scaled_font.to_raw_none()) };
status_to_result(status).expect("Failed to create a scaled font");
scaled_font
}
#[cfg(feature = "use_glib")]
pub fn to_raw_none(&self) -> *mut ffi::cairo_scaled_font_t {
self.to_glib_none().0
}
#[cfg(not(feature = "use_glib"))]
pub fn to_raw_none(&self) -> *mut ffi::cairo_scaled_font_t {
self.0.as_ptr()
}
#[cfg(not(feature = "use_glib"))]
pub unsafe fn from_raw_full(ptr: *mut ffi::cairo_scaled_font_t) -> ScaledFont {
assert!(!ptr.is_null());
ScaledFont(ptr::NonNull::new_unchecked(ptr))
}
#[cfg(feature = "use_glib")]
pub unsafe fn from_raw_full(ptr: *mut ffi::cairo_scaled_font_t) -> ScaledFont {
from_glib_full(ptr)
}
#[cfg(feature = "use_glib")]
pub unsafe fn from_raw_none(ptr: *mut ffi::cairo_scaled_font_t) -> ScaledFont {
from_glib_none(ptr)
}
#[cfg(not(feature = "use_glib"))]
pub unsafe fn from_raw_none(ptr: *mut ffi::cairo_scaled_font_t) -> ScaledFont {
assert!(!ptr.is_null());
ffi::cairo_scaled_font_reference(ptr);
ScaledFont(ptr::NonNull::new_unchecked(ptr))
}
pub fn get_type(&self) -> FontType {
unsafe { FontType::from(ffi::cairo_scaled_font_get_type(self.to_raw_none())) }
}
pub fn get_reference_count(&self) -> usize {
unsafe { ffi::cairo_scaled_font_get_reference_count(self.to_raw_none()) as usize }
}
pub fn extents(&self) -> FontExtents {
let mut extents = FontExtents {
ascent: 0.0,
descent: 0.0,
height: 0.0,
max_x_advance: 0.0,
max_y_advance: 0.0,
};
unsafe { ffi::cairo_scaled_font_extents(self.to_raw_none(), &mut extents) }
extents
}
pub fn text_extents(&self, text: &str) -> TextExtents {
let mut extents = TextExtents {
x_bearing: 0.0,
y_bearing: 0.0,
width: 0.0,
height: 0.0,
x_advance: 0.0,
y_advance: 0.0,
};
let text = CString::new(text).unwrap();
unsafe {
ffi::cairo_scaled_font_text_extents(self.to_raw_none(), text.as_ptr(), &mut extents)
}
extents
}
pub fn glyph_extents(&self, glyphs: &[Glyph]) -> TextExtents {
let mut extents = TextExtents {
x_bearing: 0.0,
y_bearing: 0.0,
width: 0.0,
height: 0.0,
x_advance: 0.0,
y_advance: 0.0,
};
unsafe {
ffi::cairo_scaled_font_glyph_extents(
self.to_raw_none(), | glyphs.len() as i32,
&mut extents,
)
}
extents
}
pub fn text_to_glyphs(&self, x: f64, y: f64, text: &str) -> (Vec<Glyph>, Vec<TextCluster>) {
// This large unsafe block is due to the FFI function returning two specially allocated
// (cairo_{glyph,text_cluster}_allocate) pointers that need to be copied into Vec<T>
// types before they're of any use to Rust code.
unsafe {
let mut glyphs_ptr: *mut Glyph = ptr::null_mut();
let mut glyph_count = 0i32;
let mut clusters_ptr: *mut TextCluster = ptr::null_mut();
let mut cluster_count = 0i32;
let mut cluster_flags = 0i32;
let text_length = text.len() as i32;
let text = CString::new(text).unwrap();
let status = ffi::cairo_scaled_font_text_to_glyphs(
self.to_raw_none(),
x,
y,
text.as_ptr(),
text_length,
&mut glyphs_ptr,
&mut glyph_count,
&mut clusters_ptr,
&mut cluster_count,
&mut cluster_flags,
);
status_to_result(status).expect("Failed to convert text to glyphs");
let glyph_count = glyph_count as usize;
let glyphs: Vec<Glyph> = {
let mut glyphs: Vec<Glyph> = Vec::with_capacity(glyph_count);
glyphs.set_len(glyph_count);
ptr::copy(glyphs_ptr, glyphs.as_mut_ptr(), glyph_count);
glyphs
};
let cluster_count = cluster_count as usize;
let clusters: Vec<TextCluster> = {
let mut clusters = Vec::with_capacity(cluster_count);
clusters.set_len(cluster_count);
ptr::copy(clusters_ptr, clusters.as_mut_ptr(), cluster_count);
clusters
};
ffi::cairo_glyph_free(glyphs_ptr);
ffi::cairo_text_cluster_free(clusters_ptr);
(glyphs, clusters)
}
}
pub fn get_font_face(&self) -> FontFace {
unsafe { FontFace::from_raw_none(ffi::cairo_scaled_font_get_font_face(self.to_raw_none())) }
}
pub fn get_font_options(&self) -> FontOptions {
let options = FontOptions::new();
unsafe {
ffi::cairo_scaled_font_get_font_options(self.to_raw_none(), options.to_raw_none())
}
options
}
pub fn get_font_matrix(&self) -> Matrix {
let mut matrix = Matrix::null();
unsafe { ffi::cairo_scaled_font_get_font_matrix(self.to_raw_none(), matrix.mut_ptr()) }
matrix
}
pub fn get_ctm(&self) -> Matrix {
let mut matrix = Matrix::null();
unsafe { ffi::cairo_scaled_font_get_ctm(self.to_raw_none(), matrix.mut_ptr()) }
matrix
}
pub fn get_scale_matrix(&self) -> Matrix {
let mut matrix = Matrix::null();
unsafe { ffi::cairo_scaled_font_get_scale_matrix(self.to_raw_none(), matrix.mut_ptr()) }
matrix
}
user_data_methods! {
ffi::cairo_scaled_font_get_user_data,
ffi::cairo_scaled_font_set_user_data,
}
}
#[cfg(not(feature = "use_glib"))]
impl Drop for ScaledFont {
fn drop(&mut self) {
unsafe {
ffi::cairo_scaled_font_destroy(self.to_raw_none());
}
}
}
#[cfg(not(feature = "use_glib"))]
impl Clone for ScaledFont {
fn clone(&self) -> ScaledFont {
unsafe { ScaledFont::from_raw_none(self.to_raw_none()) }
}
} | glyphs.as_ptr(), |
views.py | from django.shortcuts import render, redirect
from django.utils import translation, timezone
from django.http import HttpResponseRedirect
from django.urls import reverse
from product.models import Product, Product_type
from .forms import ProductForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
# This is main site 'index.html' view
def main(request):
userLanguage = 'en'
products = Product.objects.all().order_by('product_detail_type__product_type')
print("**********")
print(products)
site="main"
if translation.LANGUAGE_SESSION_KEY in request.session:
userLanguage = request.session[translation.LANGUAGE_SESSION_KEY]
context = {'site':site, 'products':products}
translation.activate(userLanguage)
return render(request, 'index.html', context)
def transEn(request):
if translation.LANGUAGE_SESSION_KEY in request.session:
del request.session[translation.LANGUAGE_SESSION_KEY]
userLanguage = 'en'
request.session[translation.LANGUAGE_SESSION_KEY] = userLanguage
return redirect(reverse('home'))
def transKo(request):
|
# file upload for Product
def model_form_upload(request):
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES)
if form.is_valid():
form.save()
else:
form = ProductForm()
products = Product.objects.all()
context = {'form': form, 'products': products}
print("**********")
print(products)
return render(request, 'mainsite/product_upload.html', context)
| if translation.LANGUAGE_SESSION_KEY in request.session:
del request.session[translation.LANGUAGE_SESSION_KEY]
userLanguage = 'ko'
request.session[translation.LANGUAGE_SESSION_KEY] = userLanguage
return redirect(reverse('home')) |
reset.password.component.ts | import { Component, ChangeDetectorRef, OnInit, Inject } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import { NbAuthService, NB_AUTH_OPTIONS, NbResetPasswordComponent } from '@nebular/auth';
@Component({
selector: 'ngx-reset',
templateUrl: 'reset-password.component.html',
})
export class ResetPasswordComponent extends NbResetPasswordComponent implements OnInit {
token = '';
constructor(
@Inject(NB_AUTH_OPTIONS) protected options: {},
protected service: NbAuthService,
protected cd: ChangeDetectorRef,
protected router: Router,
private route: ActivatedRoute,
) {
super(service, options, cd, router);
}
ngOnInit() {
this.route.queryParams.subscribe(
params => {
this.user.token = params['token']; | } | },
);
} |
options.go | package gorocksdb
// #include "rocksdb/c.h"
// #include "gorocksdb.h"
import "C"
import "unsafe"
// CompressionType specifies the block compression.
// DB contents are stored in a set of blocks, each of which holds a
// sequence of key,value pairs. Each block may be compressed before
// being stored in a file. The following enum describes which
// compression method (if any) is used to compress a block.
type CompressionType uint
// Compression types.
const (
NoCompression = CompressionType(C.rocksdb_no_compression)
SnappyCompression = CompressionType(C.rocksdb_snappy_compression)
ZLibCompression = CompressionType(C.rocksdb_zlib_compression)
Bz2Compression = CompressionType(C.rocksdb_bz2_compression)
LZ4Compression = CompressionType(C.rocksdb_lz4_compression)
LZ4HCCompression = CompressionType(C.rocksdb_lz4hc_compression)
XpressCompression = CompressionType(C.rocksdb_xpress_compression)
ZSTDCompression = CompressionType(C.rocksdb_zstd_compression)
)
// CompactionStyle specifies the compaction style.
type CompactionStyle uint
// Compaction styles.
const (
LevelCompactionStyle = CompactionStyle(C.rocksdb_level_compaction)
UniversalCompactionStyle = CompactionStyle(C.rocksdb_universal_compaction)
FIFOCompactionStyle = CompactionStyle(C.rocksdb_fifo_compaction)
)
// CompactionAccessPattern specifies the access patern in compaction.
type CompactionAccessPattern uint
// Access patterns for compaction.
const (
NoneCompactionAccessPattern = CompactionAccessPattern(0)
NormalCompactionAccessPattern = CompactionAccessPattern(1)
SequentialCompactionAccessPattern = CompactionAccessPattern(2)
WillneedCompactionAccessPattern = CompactionAccessPattern(3)
)
// InfoLogLevel describes the log level.
type InfoLogLevel uint
// Log leves.
const (
DebugInfoLogLevel = InfoLogLevel(0)
InfoInfoLogLevel = InfoLogLevel(1)
WarnInfoLogLevel = InfoLogLevel(2)
ErrorInfoLogLevel = InfoLogLevel(3)
FatalInfoLogLevel = InfoLogLevel(4)
)
// Options represent all of the available options when opening a database with Open.
type Options struct {
c *C.rocksdb_options_t
// Hold references for GC.
env *Env
bbto *BlockBasedTableOptions
// We keep these so we can free their memory in Destroy.
ccmp *C.rocksdb_comparator_t
cmo *C.rocksdb_mergeoperator_t
cst *C.rocksdb_slicetransform_t
ccf *C.rocksdb_compactionfilter_t
}
// NewDefaultOptions creates the default Options.
func NewDefaultOptions() *Options |
// NewNativeOptions creates a Options object.
func NewNativeOptions(c *C.rocksdb_options_t) *Options {
return &Options{c: c}
}
// -------------------
// Parameters that affect behavior
// SetCompactionFilter sets the specified compaction filter
// which will be applied on compactions.
// Default: nil
func (opts *Options) SetCompactionFilter(value CompactionFilter) {
if nc, ok := value.(nativeCompactionFilter); ok {
opts.ccf = nc.c
} else {
idx := registerCompactionFilter(value)
opts.ccf = C.gorocksdb_compactionfilter_create(C.uintptr_t(idx))
}
C.rocksdb_options_set_compaction_filter(opts.c, opts.ccf)
}
// SetComparator sets the comparator which define the order of keys in the table.
// Default: a comparator that uses lexicographic byte-wise ordering
func (opts *Options) SetComparator(value Comparator) {
if nc, ok := value.(nativeComparator); ok {
opts.ccmp = nc.c
} else {
idx := registerComperator(value)
opts.ccmp = C.gorocksdb_comparator_create(C.uintptr_t(idx))
}
C.rocksdb_options_set_comparator(opts.c, opts.ccmp)
}
// SetMergeOperator sets the merge operator which will be called
// if a merge operations are used.
// Default: nil
func (opts *Options) SetMergeOperator(value MergeOperator) {
if nmo, ok := value.(nativeMergeOperator); ok {
opts.cmo = nmo.c
} else {
idx := registerMergeOperator(value)
opts.cmo = C.gorocksdb_mergeoperator_create(C.uintptr_t(idx))
}
C.rocksdb_options_set_merge_operator(opts.c, opts.cmo)
}
// A single CompactionFilter instance to call into during compaction.
// Allows an application to modify/delete a key-value during background
// compaction.
//
// If the client requires a new compaction filter to be used for different
// compaction runs, it can specify compaction_filter_factory instead of this
// option. The client should specify only one of the two.
// compaction_filter takes precedence over compaction_filter_factory if
// client specifies both.
//
// If multithreaded compaction is being used, the supplied CompactionFilter
// instance may be used from different threads concurrently and so should be
// thread-safe.
//
// Default: nil
// TODO: implement in C
//func (opts *Options) SetCompactionFilter(value *CompactionFilter) {
// C.rocksdb_options_set_compaction_filter(opts.c, value.filter)
//}
// This is a factory that provides compaction filter objects which allow
// an application to modify/delete a key-value during background compaction.
//
// A new filter will be created on each compaction run. If multithreaded
// compaction is being used, each created CompactionFilter will only be used
// from a single thread and so does not need to be thread-safe.
//
// Default: a factory that doesn't provide any object
// std::shared_ptr<CompactionFilterFactory> compaction_filter_factory;
// TODO: implement in C and Go
// Version TWO of the compaction_filter_factory
// It supports rolling compaction
//
// Default: a factory that doesn't provide any object
// std::shared_ptr<CompactionFilterFactoryV2> compaction_filter_factory_v2;
// TODO: implement in C and Go
// SetCreateIfMissing specifies whether the database
// should be created if it is missing.
// Default: false
func (opts *Options) SetCreateIfMissing(value bool) {
C.rocksdb_options_set_create_if_missing(opts.c, boolToChar(value))
}
// SetErrorIfExists specifies whether an error should be raised
// if the database already exists.
// Default: false
func (opts *Options) SetErrorIfExists(value bool) {
C.rocksdb_options_set_error_if_exists(opts.c, boolToChar(value))
}
// SetParanoidChecks enable/disable paranoid checks.
//
// If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable.
// If any of the writes to the database fails (Put, Delete, Merge, Write),
// the database will switch to read-only mode and fail all other
// Write operations.
// Default: false
func (opts *Options) SetParanoidChecks(value bool) {
C.rocksdb_options_set_paranoid_checks(opts.c, boolToChar(value))
}
// SetDBPaths sets the DBPaths of the options.
//
// A list of paths where SST files can be put into, with its target size.
// Newer data is placed into paths specified earlier in the vector while
// older data gradually moves to paths specified later in the vector.
//
// For example, you have a flash device with 10GB allocated for the DB,
// as well as a hard drive of 2TB, you should config it to be:
// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
//
// The system will try to guarantee data under each path is close to but
// not larger than the target size. But current and future file sizes used
// by determining where to place a file are based on best-effort estimation,
// which means there is a chance that the actual size under the directory
// is slightly more than target size under some workloads. User should give
// some buffer room for those cases.
//
// If none of the paths has sufficient room to place a file, the file will
// be placed to the last path anyway, despite to the target size.
//
// Placing newer data to earlier paths is also best-efforts. User should
// expect user files to be placed in higher levels in some extreme cases.
//
// If left empty, only one path will be used, which is db_name passed when
// opening the DB.
// Default: empty
func (opts *Options) SetDBPaths(dbpaths []*DBPath) {
l := len(dbpaths)
cDbpaths := make([]*C.rocksdb_dbpath_t, l)
for i, v := range dbpaths {
cDbpaths[i] = v.c
}
C.rocksdb_options_set_db_paths(opts.c, &cDbpaths[0], C.size_t(l))
}
// SetEnv sets the specified object to interact with the environment,
// e.g. to read/write files, schedule background work, etc.
// Default: DefaultEnv
func (opts *Options) SetEnv(value *Env) {
opts.env = value
C.rocksdb_options_set_env(opts.c, value.c)
}
// SetInfoLogLevel sets the info log level.
// Default: InfoInfoLogLevel
func (opts *Options) SetInfoLogLevel(value InfoLogLevel) {
C.rocksdb_options_set_info_log_level(opts.c, C.int(value))
}
// IncreaseParallelism sets the parallelism.
//
// By default, RocksDB uses only one background thread for flush and
// compaction. Calling this function will set it up such that total of
// `total_threads` is used. Good value for `total_threads` is the number of
// cores. You almost definitely want to call this function if your system is
// bottlenecked by RocksDB.
func (opts *Options) IncreaseParallelism(total_threads int) {
C.rocksdb_options_increase_parallelism(opts.c, C.int(total_threads))
}
// OptimizeForPointLookup optimize the DB for point lookups.
//
// Use this if you don't need to keep the data sorted, i.e. you'll never use
// an iterator, only Put() and Get() API calls
//
// If you use this with rocksdb >= 5.0.2, you must call `SetAllowConcurrentMemtableWrites(false)`
// to avoid an assertion error immediately on opening the db.
func (opts *Options) OptimizeForPointLookup(block_cache_size_mb uint64) {
C.rocksdb_options_optimize_for_point_lookup(opts.c, C.uint64_t(block_cache_size_mb))
}
// Set whether to allow concurrent memtable writes. Conccurent writes are
// not supported by all memtable factories (currently only SkipList memtables).
// As of rocksdb 5.0.2 you must call `SetAllowConcurrentMemtableWrites(false)`
// if you use `OptimizeForPointLookup`.
func (opts *Options) SetAllowConcurrentMemtableWrites(allow bool) {
C.rocksdb_options_set_allow_concurrent_memtable_write(opts.c, boolToChar(allow))
}
// OptimizeLevelStyleCompaction optimize the DB for leveld compaction.
//
// Default values for some parameters in ColumnFamilyOptions are not
// optimized for heavy workloads and big datasets, which means you might
// observe write stalls under some conditions. As a starting point for tuning
// RocksDB options, use the following two functions:
// * OptimizeLevelStyleCompaction -- optimizes level style compaction
// * OptimizeUniversalStyleCompaction -- optimizes universal style compaction
// Universal style compaction is focused on reducing Write Amplification
// Factor for big data sets, but increases Space Amplification. You can learn
// more about the different styles here:
// https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide
// Make sure to also call IncreaseParallelism(), which will provide the
// biggest performance gains.
// Note: we might use more memory than memtable_memory_budget during high
// write rate period
func (opts *Options) OptimizeLevelStyleCompaction(memtable_memory_budget uint64) {
C.rocksdb_options_optimize_level_style_compaction(opts.c, C.uint64_t(memtable_memory_budget))
}
// OptimizeUniversalStyleCompaction optimize the DB for universal compaction.
// See note on OptimizeLevelStyleCompaction.
func (opts *Options) OptimizeUniversalStyleCompaction(memtable_memory_budget uint64) {
C.rocksdb_options_optimize_universal_style_compaction(opts.c, C.uint64_t(memtable_memory_budget))
}
// SetWriteBufferSize sets the amount of data to build up in memory
// (backed by an unsorted log on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads.
// Up to max_write_buffer_number write buffers may be held in memory
// at the same time,
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
// Default: 4MB
func (opts *Options) SetWriteBufferSize(value int) {
C.rocksdb_options_set_write_buffer_size(opts.c, C.size_t(value))
}
// SetMaxWriteBufferNumber sets the maximum number of write buffers
// that are built up in memory.
//
// The default is 2, so that when 1 write buffer is being flushed to
// storage, new writes can continue to the other write buffer.
// Default: 2
func (opts *Options) SetMaxWriteBufferNumber(value int) {
C.rocksdb_options_set_max_write_buffer_number(opts.c, C.int(value))
}
// SetMinWriteBufferNumberToMerge sets the minimum number of write buffers
// that will be merged together before writing to storage.
//
// If set to 1, then all write buffers are flushed to L0 as individual files
// and this increases read amplification because a get request has to check
// in all of these files. Also, an in-memory merge may result in writing lesser
// data to storage if there are duplicate records in each of these
// individual write buffers.
// Default: 1
func (opts *Options) SetMinWriteBufferNumberToMerge(value int) {
C.rocksdb_options_set_min_write_buffer_number_to_merge(opts.c, C.int(value))
}
// SetMaxOpenFiles sets the number of open files that can be used by the DB.
//
// You may need to increase this if your database has a large working set
// (budget one open file per 2MB of working set).
// Default: 1000
func (opts *Options) SetMaxOpenFiles(value int) {
C.rocksdb_options_set_max_open_files(opts.c, C.int(value))
}
// SetMaxFileOpeningThreads sets the maximum number of file opening threads.
// If max_open_files is -1, DB will open all files on DB::Open(). You can
// use this option to increase the number of threads used to open the files.
// Default: 16
func (opts *Options) SetMaxFileOpeningThreads(value int) {
C.rocksdb_options_set_max_file_opening_threads(opts.c, C.int(value))
}
// SetMaxTotalWalSize sets the maximum total wal size in bytes.
// Once write-ahead logs exceed this size, we will start forcing the flush of
// column families whose memtables are backed by the oldest live WAL file
// (i.e. the ones that are causing all the space amplification). If set to 0
// (default), we will dynamically choose the WAL size limit to be
// [sum of all write_buffer_size * max_write_buffer_number] * 4
// Default: 0
func (opts *Options) SetMaxTotalWalSize(value uint64) {
C.rocksdb_options_set_max_total_wal_size(opts.c, C.uint64_t(value))
}
// SetCompression sets the compression algorithm.
// Default: SnappyCompression, which gives lightweight but fast
// compression.
func (opts *Options) SetCompression(value CompressionType) {
C.rocksdb_options_set_compression(opts.c, C.int(value))
}
// SetCompressionPerLevel sets different compression algorithm per level.
//
// Different levels can have different compression policies. There
// are cases where most lower levels would like to quick compression
// algorithm while the higher levels (which have more data) use
// compression algorithms that have better compression but could
// be slower. This array should have an entry for
// each level of the database. This array overrides the
// value specified in the previous field 'compression'.
func (opts *Options) SetCompressionPerLevel(value []CompressionType) {
cLevels := make([]C.int, len(value))
for i, v := range value {
cLevels[i] = C.int(v)
}
C.rocksdb_options_set_compression_per_level(opts.c, &cLevels[0], C.size_t(len(value)))
}
// SetMinLevelToCompress sets the start level to use compression.
func (opts *Options) SetMinLevelToCompress(value int) {
C.rocksdb_options_set_min_level_to_compress(opts.c, C.int(value))
}
// SetCompressionOptions sets different options for compression algorithms.
// Default: nil
func (opts *Options) SetCompressionOptions(value *CompressionOptions) {
C.rocksdb_options_set_compression_options(opts.c, C.int(value.WindowBits), C.int(value.Level), C.int(value.Strategy), C.int(value.MaxDictBytes))
}
// SetPrefixExtractor sets the prefic extractor.
//
// If set, use the specified function to determine the
// prefixes for keys. These prefixes will be placed in the filter.
// Depending on the workload, this can reduce the number of read-IOP
// cost for scans when a prefix is passed via ReadOptions to
// db.NewIterator().
// Default: nil
func (opts *Options) SetPrefixExtractor(value SliceTransform) {
if nst, ok := value.(nativeSliceTransform); ok {
opts.cst = nst.c
} else {
idx := registerSliceTransform(value)
opts.cst = C.gorocksdb_slicetransform_create(C.uintptr_t(idx))
}
C.rocksdb_options_set_prefix_extractor(opts.c, opts.cst)
}
// SetNumLevels sets the number of levels for this database.
// Default: 7
func (opts *Options) SetNumLevels(value int) {
C.rocksdb_options_set_num_levels(opts.c, C.int(value))
}
// SetLevel0FileNumCompactionTrigger sets the number of files
// to trigger level-0 compaction.
//
// A value <0 means that level-0 compaction will not be
// triggered by number of files at all.
// Default: 4
func (opts *Options) SetLevel0FileNumCompactionTrigger(value int) {
C.rocksdb_options_set_level0_file_num_compaction_trigger(opts.c, C.int(value))
}
// SetLevel0SlowdownWritesTrigger sets the soft limit on number of level-0 files.
//
// We start slowing down writes at this point.
// A value <0 means that no writing slow down will be triggered by
// number of files in level-0.
// Default: 8
func (opts *Options) SetLevel0SlowdownWritesTrigger(value int) {
C.rocksdb_options_set_level0_slowdown_writes_trigger(opts.c, C.int(value))
}
// SetLevel0StopWritesTrigger sets the maximum number of level-0 files.
// We stop writes at this point.
// Default: 12
func (opts *Options) SetLevel0StopWritesTrigger(value int) {
C.rocksdb_options_set_level0_stop_writes_trigger(opts.c, C.int(value))
}
// SetMaxMemCompactionLevel sets the maximum level
// to which a new compacted memtable is pushed if it does not create overlap.
//
// We try to push to level 2 to avoid the
// relatively expensive level 0=>1 compactions and to avoid some
// expensive manifest file operations. We do not push all the way to
// the largest level since that can generate a lot of wasted disk
// space if the same key space is being repeatedly overwritten.
// Default: 2
func (opts *Options) SetMaxMemCompactionLevel(value int) {
C.rocksdb_options_set_max_mem_compaction_level(opts.c, C.int(value))
}
// SetTargetFileSizeBase sets the target file size for compaction.
//
// Target file size is per-file size for level-1.
// Target file size for level L can be calculated by
// target_file_size_base * (target_file_size_multiplier ^ (L-1))
//
// For example, if target_file_size_base is 2MB and
// target_file_size_multiplier is 10, then each file on level-1 will
// be 2MB, and each file on level 2 will be 20MB,
// and each file on level-3 will be 200MB.
// Default: 2MB
func (opts *Options) SetTargetFileSizeBase(value uint64) {
C.rocksdb_options_set_target_file_size_base(opts.c, C.uint64_t(value))
}
// SetTargetFileSizeMultiplier sets the target file size multiplier for compaction.
// Default: 1
func (opts *Options) SetTargetFileSizeMultiplier(value int) {
C.rocksdb_options_set_target_file_size_multiplier(opts.c, C.int(value))
}
// SetMaxBytesForLevelBase sets the maximum total data size for a level.
//
// It is the max total for level-1.
// Maximum number of bytes for level L can be calculated as
// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
//
// For example, if max_bytes_for_level_base is 20MB, and if
// max_bytes_for_level_multiplier is 10, total data size for level-1
// will be 20MB, total file size for level-2 will be 200MB,
// and total file size for level-3 will be 2GB.
// Default: 10MB
func (opts *Options) SetMaxBytesForLevelBase(value uint64) {
C.rocksdb_options_set_max_bytes_for_level_base(opts.c, C.uint64_t(value))
}
// SetMaxBytesForLevelMultiplier sets the max Bytes for level multiplier.
// Default: 10
func (opts *Options) SetMaxBytesForLevelMultiplier(value float64) {
C.rocksdb_options_set_max_bytes_for_level_multiplier(opts.c, C.double(value))
}
// SetLevelCompactiondynamiclevelbytes specifies whether to pick
// target size of each level dynamically.
//
// We will pick a base level b >= 1. L0 will be directly merged into level b,
// instead of always into level 1. Level 1 to b-1 need to be empty.
// We try to pick b and its target size so that
// 1. target size is in the range of
// (max_bytes_for_level_base / max_bytes_for_level_multiplier,
// max_bytes_for_level_base]
// 2. target size of the last level (level num_levels-1) equals to extra size
// of the level.
// At the same time max_bytes_for_level_multiplier and
// max_bytes_for_level_multiplier_additional are still satisfied.
//
// With this option on, from an empty DB, we make last level the base level,
// which means merging L0 data into the last level, until it exceeds
// max_bytes_for_level_base. And then we make the second last level to be
// base level, to start to merge L0 data to second last level, with its
// target size to be 1/max_bytes_for_level_multiplier of the last level's
// extra size. After the data accumulates more so that we need to move the
// base level to the third last one, and so on.
//
// For example, assume max_bytes_for_level_multiplier=10, num_levels=6,
// and max_bytes_for_level_base=10MB.
// Target sizes of level 1 to 5 starts with:
// [- - - - 10MB]
// with base level is level. Target sizes of level 1 to 4 are not applicable
// because they will not be used.
// Until the size of Level 5 grows to more than 10MB, say 11MB, we make
// base target to level 4 and now the targets looks like:
// [- - - 1.1MB 11MB]
// While data are accumulated, size targets are tuned based on actual data
// of level 5. When level 5 has 50MB of data, the target is like:
// [- - - 5MB 50MB]
// Until level 5's actual size is more than 100MB, say 101MB. Now if we keep
// level 4 to be the base level, its target size needs to be 10.1MB, which
// doesn't satisfy the target size range. So now we make level 3 the target
// size and the target sizes of the levels look like:
// [- - 1.01MB 10.1MB 101MB]
// In the same way, while level 5 further grows, all levels' targets grow,
// like
// [- - 5MB 50MB 500MB]
// Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
// base level and make levels' target sizes like this:
// [- 1.001MB 10.01MB 100.1MB 1001MB]
// and go on...
//
// By doing it, we give max_bytes_for_level_multiplier a priority against
// max_bytes_for_level_base, for a more predictable LSM tree shape. It is
// useful to limit worse case space amplification.
//
// max_bytes_for_level_multiplier_additional is ignored with this flag on.
//
// Turning this feature on or off for an existing DB can cause unexpected
// LSM tree structure so it's not recommended.
//
// Default: false
func (opts *Options) SetLevelCompactionDynamicLevelBytes(value bool) {
C.rocksdb_options_set_level_compaction_dynamic_level_bytes(opts.c, boolToChar(value))
}
// SetMaxCompactionBytes sets the maximum number of bytes in all compacted files.
// We try to limit number of bytes in one compaction to be lower than this
// threshold. But it's not guaranteed.
// Value 0 will be sanitized.
// Default: result.target_file_size_base * 25
func (opts *Options) SetMaxCompactionBytes(value uint64) {
C.rocksdb_options_set_max_compaction_bytes(opts.c, C.uint64_t(value))
}
// SetSoftPendingCompactionBytesLimit sets the threshold at which
// all writes will be slowed down to at least delayed_write_rate if estimated
// bytes needed to be compaction exceed this threshold.
//
// Default: 64GB
func (opts *Options) SetSoftPendingCompactionBytesLimit(value uint64) {
C.rocksdb_options_set_soft_pending_compaction_bytes_limit(opts.c, C.size_t(value))
}
// SetHardPendingCompactionBytesLimit sets the bytes threshold at which
// all writes are stopped if estimated bytes needed to be compaction exceed
// this threshold.
//
// Default: 256GB
func (opts *Options) SetHardPendingCompactionBytesLimit(value uint64) {
C.rocksdb_options_set_hard_pending_compaction_bytes_limit(opts.c, C.size_t(value))
}
// SetMaxBytesForLevelMultiplierAdditional sets different max-size multipliers
// for different levels.
//
// These are multiplied by max_bytes_for_level_multiplier to arrive
// at the max-size of each level.
// Default: 1 for each level
func (opts *Options) SetMaxBytesForLevelMultiplierAdditional(value []int) {
cLevels := make([]C.int, len(value))
for i, v := range value {
cLevels[i] = C.int(v)
}
C.rocksdb_options_set_max_bytes_for_level_multiplier_additional(opts.c, &cLevels[0], C.size_t(len(value)))
}
// SetUseFsync enable/disable fsync.
//
// If true, then every store to stable storage will issue a fsync.
// If false, then every store to stable storage will issue a fdatasync.
// This parameter should be set to true while storing data to
// filesystem like ext3 that can lose files after a reboot.
// Default: false
func (opts *Options) SetUseFsync(value bool) {
C.rocksdb_options_set_use_fsync(opts.c, C.int(btoi(value)))
}
// SetDbLogDir specifies the absolute info LOG dir.
//
// If it is empty, the log files will be in the same dir as data.
// If it is non empty, the log files will be in the specified dir,
// and the db data dir's absolute path will be used as the log file
// name's prefix.
// Default: empty
func (opts *Options) SetDbLogDir(value string) {
cvalue := C.CString(value)
defer C.free(unsafe.Pointer(cvalue))
C.rocksdb_options_set_db_log_dir(opts.c, cvalue)
}
// SetWalDir specifies the absolute dir path for write-ahead logs (WAL).
//
// If it is empty, the log files will be in the same dir as data.
// If it is non empty, the log files will be in the specified dir,
// When destroying the db, all log files and the dir itopts is deleted.
// Default: empty
func (opts *Options) SetWalDir(value string) {
cvalue := C.CString(value)
defer C.free(unsafe.Pointer(cvalue))
C.rocksdb_options_set_wal_dir(opts.c, cvalue)
}
// SetDeleteObsoleteFilesPeriodMicros sets the periodicity
// when obsolete files get deleted.
//
// The files that get out of scope by compaction
// process will still get automatically delete on every compaction,
// regardless of this setting.
// Default: 6 hours
func (opts *Options) SetDeleteObsoleteFilesPeriodMicros(value uint64) {
C.rocksdb_options_set_delete_obsolete_files_period_micros(opts.c, C.uint64_t(value))
}
// SetMaxBackgroundCompactions sets the maximum number of
// concurrent background jobs, submitted to
// the default LOW priority thread pool
// Default: 1
func (opts *Options) SetMaxBackgroundCompactions(value int) {
C.rocksdb_options_set_max_background_compactions(opts.c, C.int(value))
}
// SetMaxBackgroundFlushes sets the maximum number of
// concurrent background memtable flush jobs, submitted to
// the HIGH priority thread pool.
//
// By default, all background jobs (major compaction and memtable flush) go
// to the LOW priority pool. If this option is set to a positive number,
// memtable flush jobs will be submitted to the HIGH priority pool.
// It is important when the same Env is shared by multiple db instances.
// Without a separate pool, long running major compaction jobs could
// potentially block memtable flush jobs of other db instances, leading to
// unnecessary Put stalls.
// Default: 0
func (opts *Options) SetMaxBackgroundFlushes(value int) {
C.rocksdb_options_set_max_background_flushes(opts.c, C.int(value))
}
// SetMaxLogFileSize sets the maximal size of the info log file.
//
// If the log file is larger than `max_log_file_size`, a new info log
// file will be created.
// If max_log_file_size == 0, all logs will be written to one log file.
// Default: 0
func (opts *Options) SetMaxLogFileSize(value int) {
C.rocksdb_options_set_max_log_file_size(opts.c, C.size_t(value))
}
// SetLogFileTimeToRoll sets the time for the info log file to roll (in seconds).
//
// If specified with non-zero value, log file will be rolled
// if it has been active longer than `log_file_time_to_roll`.
// Default: 0 (disabled)
func (opts *Options) SetLogFileTimeToRoll(value int) {
C.rocksdb_options_set_log_file_time_to_roll(opts.c, C.size_t(value))
}
// SetKeepLogFileNum sets the maximal info log files to be kept.
// Default: 1000
func (opts *Options) SetKeepLogFileNum(value int) {
C.rocksdb_options_set_keep_log_file_num(opts.c, C.size_t(value))
}
// SetSoftRateLimit sets the soft rate limit.
//
// Puts are delayed 0-1 ms when any level has a compaction score that exceeds
// soft_rate_limit. This is ignored when == 0.0.
// CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
// hold, RocksDB will set soft_rate_limit = hard_rate_limit
// Default: 0.0 (disabled)
func (opts *Options) SetSoftRateLimit(value float64) {
C.rocksdb_options_set_soft_rate_limit(opts.c, C.double(value))
}
// SetHardRateLimit sets the hard rate limit.
//
// Puts are delayed 1ms at a time when any level has a compaction score that
// exceeds hard_rate_limit. This is ignored when <= 1.0.
// Default: 0.0 (disabled)
func (opts *Options) SetHardRateLimit(value float64) {
C.rocksdb_options_set_hard_rate_limit(opts.c, C.double(value))
}
// SetRateLimitDelayMaxMilliseconds sets the max time
// a put will be stalled when hard_rate_limit is enforced.
// If 0, then there is no limit.
// Default: 1000
func (opts *Options) SetRateLimitDelayMaxMilliseconds(value uint) {
C.rocksdb_options_set_rate_limit_delay_max_milliseconds(opts.c, C.uint(value))
}
// SetMaxManifestFileSize sets the maximal manifest file size until is rolled over.
// The older manifest file be deleted.
// Default: MAX_INT so that roll-over does not take place.
func (opts *Options) SetMaxManifestFileSize(value uint64) {
C.rocksdb_options_set_max_manifest_file_size(opts.c, C.size_t(value))
}
// SetTableCacheNumshardbits sets the number of shards used for table cache.
// Default: 4
func (opts *Options) SetTableCacheNumshardbits(value int) {
C.rocksdb_options_set_table_cache_numshardbits(opts.c, C.int(value))
}
// SetTableCacheRemoveScanCountLimit sets the count limit during a scan.
//
// During data eviction of table's LRU cache, it would be inefficient
// to strictly follow LRU because this piece of memory will not really
// be released unless its refcount falls to zero. Instead, make two
// passes: the first pass will release items with refcount = 1,
// and if not enough space releases after scanning the number of
// elements specified by this parameter, we will remove items in LRU order.
// Default: 16
func (opts *Options) SetTableCacheRemoveScanCountLimit(value int) {
C.rocksdb_options_set_table_cache_remove_scan_count_limit(opts.c, C.int(value))
}
// SetArenaBlockSize sets the size of one block in arena memory allocation.
//
// If <= 0, a proper value is automatically calculated (usually 1/10 of
// writer_buffer_size).
// Default: 0
func (opts *Options) SetArenaBlockSize(value int) {
C.rocksdb_options_set_arena_block_size(opts.c, C.size_t(value))
}
// SetDisableAutoCompactions enable/disable automatic compactions.
//
// Manual compactions can still be issued on this database.
// Default: false
func (opts *Options) SetDisableAutoCompactions(value bool) {
C.rocksdb_options_set_disable_auto_compactions(opts.c, C.int(btoi(value)))
}
// SetWALTtlSeconds sets the WAL ttl in seconds.
//
// The following two options affect how archived logs will be deleted.
// 1. If both set to 0, logs will be deleted asap and will not get into
// the archive.
// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
// WAL files will be checked every 10 min and if total size is greater
// then wal_size_limit_mb, they will be deleted starting with the
// earliest until size_limit is met. All empty files will be deleted.
// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
// WAL files will be checked every wal_ttl_seconds / 2 and those that
// are older than wal_ttl_seconds will be deleted.
// 4. If both are not 0, WAL files will be checked every 10 min and both
// checks will be performed with ttl being first.
// Default: 0
func (opts *Options) SetWALTtlSeconds(value uint64) {
C.rocksdb_options_set_WAL_ttl_seconds(opts.c, C.uint64_t(value))
}
// SetWalSizeLimitMb sets the WAL size limit in MB.
//
// If total size of WAL files is greater then wal_size_limit_mb,
// they will be deleted starting with the earliest until size_limit is met
// Default: 0
func (opts *Options) SetWalSizeLimitMb(value uint64) {
C.rocksdb_options_set_WAL_size_limit_MB(opts.c, C.uint64_t(value))
}
// SetManifestPreallocationSize sets the number of bytes
// to preallocate (via fallocate) the manifest files.
//
// Default is 4mb, which is reasonable to reduce random IO
// as well as prevent overallocation for mounts that preallocate
// large amounts of data (such as xfs's allocsize option).
// Default: 4mb
func (opts *Options) SetManifestPreallocationSize(value int) {
C.rocksdb_options_set_manifest_preallocation_size(opts.c, C.size_t(value))
}
// SetPurgeRedundantKvsWhileFlush enable/disable purging of
// duplicate/deleted keys when a memtable is flushed to storage.
// Default: true
func (opts *Options) SetPurgeRedundantKvsWhileFlush(value bool) {
C.rocksdb_options_set_purge_redundant_kvs_while_flush(opts.c, boolToChar(value))
}
// SetAllowMmapReads enable/disable mmap reads for reading sst tables.
// Default: false
func (opts *Options) SetAllowMmapReads(value bool) {
C.rocksdb_options_set_allow_mmap_reads(opts.c, boolToChar(value))
}
// SetAllowMmapWrites enable/disable mmap writes for writing sst tables.
// Default: false
func (opts *Options) SetAllowMmapWrites(value bool) {
C.rocksdb_options_set_allow_mmap_writes(opts.c, boolToChar(value))
}
// SetUseDirectReads enable/disable direct I/O mode (O_DIRECT) for reads
// Default: false
func (opts *Options) SetUseDirectReads(value bool) {
C.rocksdb_options_set_use_direct_reads(opts.c, boolToChar(value))
}
// SetUseDirectIOForFlushAndCompaction enable/disable direct I/O mode (O_DIRECT) for both reads and writes in background flush and compactions
// When true, new_table_reader_for_compaction_inputs is forced to true.
// Default: false
func (opts *Options) SetUseDirectIOForFlushAndCompaction(value bool) {
C.rocksdb_options_set_use_direct_io_for_flush_and_compaction(opts.c, boolToChar(value))
}
// SetIsFdCloseOnExec enable/dsiable child process inherit open files.
// Default: true
func (opts *Options) SetIsFdCloseOnExec(value bool) {
C.rocksdb_options_set_is_fd_close_on_exec(opts.c, boolToChar(value))
}
// SetSkipLogErrorOnRecovery enable/disable skipping of
// log corruption error on recovery (If client is ok with
// losing most recent changes)
// Default: false
func (opts *Options) SetSkipLogErrorOnRecovery(value bool) {
C.rocksdb_options_set_skip_log_error_on_recovery(opts.c, boolToChar(value))
}
// SetStatsDumpPeriodSec sets the stats dump period in seconds.
//
// If not zero, dump stats to LOG every stats_dump_period_sec
// Default: 3600 (1 hour)
func (opts *Options) SetStatsDumpPeriodSec(value uint) {
C.rocksdb_options_set_stats_dump_period_sec(opts.c, C.uint(value))
}
// SetAdviseRandomOnOpen specifies whether we will hint the underlying
// file system that the file access pattern is random, when a sst file is opened.
// Default: true
func (opts *Options) SetAdviseRandomOnOpen(value bool) {
C.rocksdb_options_set_advise_random_on_open(opts.c, boolToChar(value))
}
// SetDbWriteBufferSize sets the amount of data to build up
// in memtables across all column families before writing to disk.
//
// This is distinct from write_buffer_size, which enforces a limit
// for a single memtable.
//
// This feature is disabled by default. Specify a non-zero value
// to enable it.
//
// Default: 0 (disabled)
func (opts *Options) SetDbWriteBufferSize(value int) {
C.rocksdb_options_set_db_write_buffer_size(opts.c, C.size_t(value))
}
// SetAccessHintOnCompactionStart specifies the file access pattern
// once a compaction is started.
//
// It will be applied to all input files of a compaction.
// Default: NormalCompactionAccessPattern
func (opts *Options) SetAccessHintOnCompactionStart(value CompactionAccessPattern) {
C.rocksdb_options_set_access_hint_on_compaction_start(opts.c, C.int(value))
}
// SetUseAdaptiveMutex enable/disable adaptive mutex, which spins
// in the user space before resorting to kernel.
//
// This could reduce context switch when the mutex is not
// heavily contended. However, if the mutex is hot, we could end up
// wasting spin time.
// Default: false
func (opts *Options) SetUseAdaptiveMutex(value bool) {
C.rocksdb_options_set_use_adaptive_mutex(opts.c, boolToChar(value))
}
// SetBytesPerSync sets the bytes per sync.
//
// Allows OS to incrementally sync files to disk while they are being
// written, asynchronously, in the background.
// Issue one request for every bytes_per_sync written.
// Default: 0 (disabled)
func (opts *Options) SetBytesPerSync(value uint64) {
C.rocksdb_options_set_bytes_per_sync(opts.c, C.uint64_t(value))
}
// SetCompactionStyle sets the compaction style.
// Default: LevelCompactionStyle
func (opts *Options) SetCompactionStyle(value CompactionStyle) {
C.rocksdb_options_set_compaction_style(opts.c, C.int(value))
}
// SetUniversalCompactionOptions sets the options needed
// to support Universal Style compactions.
// Default: nil
func (opts *Options) SetUniversalCompactionOptions(value *UniversalCompactionOptions) {
C.rocksdb_options_set_universal_compaction_options(opts.c, value.c)
}
// SetFIFOCompactionOptions sets the options for FIFO compaction style.
// Default: nil
func (opts *Options) SetFIFOCompactionOptions(value *FIFOCompactionOptions) {
C.rocksdb_options_set_fifo_compaction_options(opts.c, value.c)
}
// SetRateLimiter sets the rate limiter of the options.
// Use to control write rate of flush and compaction. Flush has higher
// priority than compaction. Rate limiting is disabled if nullptr.
// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
// Default: nullptr
func (opts *Options) SetRateLimiter(rateLimiter *RateLimiter) {
C.rocksdb_options_set_ratelimiter(opts.c, rateLimiter.c)
}
// SetMaxSequentialSkipInIterations specifies whether an iteration->Next()
// sequentially skips over keys with the same user-key or not.
//
// This number specifies the number of keys (with the same userkey)
// that will be sequentially skipped before a reseek is issued.
// Default: 8
func (opts *Options) SetMaxSequentialSkipInIterations(value uint64) {
C.rocksdb_options_set_max_sequential_skip_in_iterations(opts.c, C.uint64_t(value))
}
// SetInplaceUpdateSupport enable/disable thread-safe inplace updates.
//
// Requires updates if
// * key exists in current memtable
// * new sizeof(new_value) <= sizeof(old_value)
// * old_value for that key is a put i.e. kTypeValue
// Default: false.
func (opts *Options) SetInplaceUpdateSupport(value bool) {
C.rocksdb_options_set_inplace_update_support(opts.c, boolToChar(value))
}
// SetInplaceUpdateNumLocks sets the number of locks used for inplace update.
// Default: 10000, if inplace_update_support = true, else 0.
func (opts *Options) SetInplaceUpdateNumLocks(value int) {
C.rocksdb_options_set_inplace_update_num_locks(opts.c, C.size_t(value))
}
// SetMemtableHugePageSize sets the page size for huge page for
// arena used by the memtable.
// If <=0, it won't allocate from huge page but from malloc.
// Users are responsible to reserve huge pages for it to be allocated. For
// example:
// sysctl -w vm.nr_hugepages=20
// See linux doc Documentation/vm/hugetlbpage.txt
// If there isn't enough free huge page available, it will fall back to
// malloc.
//
// Dynamically changeable through SetOptions() API
func (opts *Options) SetMemtableHugePageSize(value int) {
C.rocksdb_options_set_memtable_huge_page_size(opts.c, C.size_t(value))
}
// SetBloomLocality sets the bloom locality.
//
// Control locality of bloom filter probes to improve cache miss rate.
// This option only applies to memtable prefix bloom and plaintable
// prefix bloom. It essentially limits the max number of cache lines each
// bloom filter check can touch.
// This optimization is turned off when set to 0. The number should never
// be greater than number of probes. This option can boost performance
// for in-memory workload but should use with care since it can cause
// higher false positive rate.
// Default: 0
func (opts *Options) SetBloomLocality(value uint32) {
C.rocksdb_options_set_bloom_locality(opts.c, C.uint32_t(value))
}
// SetMaxSuccessiveMerges sets the maximum number of
// successive merge operations on a key in the memtable.
//
// When a merge operation is added to the memtable and the maximum number of
// successive merges is reached, the value of the key will be calculated and
// inserted into the memtable instead of the merge operation. This will
// ensure that there are never more than max_successive_merges merge
// operations in the memtable.
// Default: 0 (disabled)
func (opts *Options) SetMaxSuccessiveMerges(value int) {
C.rocksdb_options_set_max_successive_merges(opts.c, C.size_t(value))
}
// EnableStatistics enable statistics.
func (opts *Options) EnableStatistics() {
C.rocksdb_options_enable_statistics(opts.c)
}
// GetStatisticsString get the dump log of rocksdb.
func (opts *Options) GetStatisticsString() string {
cValue := C.rocksdb_options_statistics_get_string(opts.c)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// PrepareForBulkLoad prepare the DB for bulk loading.
//
// All data will be in level 0 without any automatic compaction.
// It's recommended to manually call CompactRange(NULL, NULL) before reading
// from the database, because otherwise the read can be very slow.
func (opts *Options) PrepareForBulkLoad() {
C.rocksdb_options_prepare_for_bulk_load(opts.c)
}
// SetMemtableVectorRep sets a MemTableRep which is backed by a vector.
//
// On iteration, the vector is sorted. This is useful for workloads where
// iteration is very rare and writes are generally not issued after reads begin.
func (opts *Options) SetMemtableVectorRep() {
C.rocksdb_options_set_memtable_vector_rep(opts.c)
}
// SetHashSkipListRep sets a hash skip list as MemTableRep.
//
// It contains a fixed array of buckets, each
// pointing to a skiplist (null if the bucket is empty).
//
// bucketCount: number of fixed array buckets
// skiplistHeight: the max height of the skiplist
// skiplistBranchingFactor: probabilistic size ratio between adjacent
// link lists in the skiplist
func (opts *Options) SetHashSkipListRep(bucketCount int, skiplistHeight, skiplistBranchingFactor int32) {
C.rocksdb_options_set_hash_skip_list_rep(opts.c, C.size_t(bucketCount), C.int32_t(skiplistHeight), C.int32_t(skiplistBranchingFactor))
}
// SetHashLinkListRep sets a hashed linked list as MemTableRep.
//
// It contains a fixed array of buckets, each pointing to a sorted single
// linked list (null if the bucket is empty).
//
// bucketCount: number of fixed array buckets
func (opts *Options) SetHashLinkListRep(bucketCount int) {
C.rocksdb_options_set_hash_link_list_rep(opts.c, C.size_t(bucketCount))
}
// SetPlainTableFactory sets a plain table factory with prefix-only seek.
//
// For this factory, you need to set prefix_extractor properly to make it
// work. Look-up will starts with prefix hash lookup for key prefix. Inside the
// hash bucket found, a binary search is executed for hash conflicts. Finally,
// a linear search is used.
//
// keyLen: plain table has optimization for fix-sized keys,
// which can be specified via keyLen.
// bloomBitsPerKey: the number of bits used for bloom filer per prefix. You
// may disable it by passing a zero.
// hashTableRatio: the desired utilization of the hash table used for prefix
// hashing. hashTableRatio = number of prefixes / #buckets
// in the hash table
// indexSparseness: inside each prefix, need to build one index record for how
// many keys for binary search inside each hash bucket.
func (opts *Options) SetPlainTableFactory(keyLen uint32, bloomBitsPerKey int, hashTableRatio float64, indexSparseness int) {
C.rocksdb_options_set_plain_table_factory(opts.c, C.uint32_t(keyLen), C.int(bloomBitsPerKey), C.double(hashTableRatio), C.size_t(indexSparseness))
}
// SetCreateIfMissingColumnFamilies specifies whether the column families
// should be created if they are missing.
func (opts *Options) SetCreateIfMissingColumnFamilies(value bool) {
C.rocksdb_options_set_create_missing_column_families(opts.c, boolToChar(value))
}
// SetBlockBasedTableFactory sets the block based table factory.
func (opts *Options) SetBlockBasedTableFactory(value *BlockBasedTableOptions) {
opts.bbto = value
C.rocksdb_options_set_block_based_table_factory(opts.c, value.c)
}
// SetAllowIngestBehind sets allow_ingest_behind
// Set this option to true during creation of database if you want
// to be able to ingest behind (call IngestExternalFile() skipping keys
// that already exist, rather than overwriting matching keys).
// Setting this option to true will affect 2 things:
// 1) Disable some internal optimizations around SST file compression
// 2) Reserve bottom-most level for ingested files only.
// 3) Note that num_levels should be >= 3 if this option is turned on.
//
// DEFAULT: false
// Immutable.
func (opts *Options) SetAllowIngestBehind(value bool) {
C.rocksdb_options_set_allow_ingest_behind(opts.c, boolToChar(value))
}
// Destroy deallocates the Options object.
func (opts *Options) Destroy() {
C.rocksdb_options_destroy(opts.c)
if opts.ccmp != nil {
C.rocksdb_comparator_destroy(opts.ccmp)
}
if opts.cst != nil {
C.rocksdb_slicetransform_destroy(opts.cst)
}
if opts.ccf != nil {
C.rocksdb_compactionfilter_destroy(opts.ccf)
}
opts.c = nil
opts.env = nil
opts.bbto = nil
}
| {
return NewNativeOptions(C.rocksdb_options_create())
} |
draw_view3.js | var draw_view3 = {
data: 0,
height: 0,
width: 0,
div: 0,
view: 0,
graph_line_class: 0,
yScale: 0,
xScale: 0,
stroke_color: 0,
name_list: 0,
record_list: 0,
graph_text_class: 0,
initialize: function() {
var self = this;
self.div = "#view3";
self.width = $(self.div).width();
self.height = $(self.div).height();
self.view = d3.select(self.div).append("svg")
.attr("width", self.width) | self.stroke_color = "steelblue";
self.name_list = [];
self.record_list = [];
self.graph_line_class = [];
self.draw();
},
draw: function(data) {
var self = this;
self.yScale = d3.scale.linear()
.domain([0, 10])
.range([0, self.height * 0.7]);
var yAxis = d3.svg.axis()
.scale(self.yScale)
.orient("left")
.ticks(0);
self.yScale.range([self.height * 0.7, 0]);
self.xScale = d3.scale.linear()
.domain([1965, 2018])
.range([0, self.width * 0.8]);
var xAxis = d3.svg.axis()
.scale(self.xScale)
.ticks(10)
var gxAxis = self.view.append("g")
.attr("transform", 'translate(' + (self.width * 0.1) + ',' + (self.height * 0.8) + ')')
.attr("class", "axis");
var gyAxis = self.view.append("g")
.attr("transform", 'translate(' + (self.width * 0.1) + ',' + (self.height * 0.1) + ')')
.attr("class", "axis");
gxAxis.call(xAxis);
gyAxis.call(yAxis);
self.yScale.range([0, self.height * 0.7]);
d3.csv("data/all_data.csv", function(error, data) {
console.log(data[0].履历);
console.log(data);
})
},
get_record: function(name) {
var self = this;
self.name_list.push(name);
self.record_list = new Array(self.name_list.length);
console.log(self.name_list);
d3.csv("data/all_data.csv", function(error, data) {
for (var i = 0; i < data.length; i++) {
if (_.contains(self.name_list, data[i].姓名))
self.record_list[_.indexOf(self.name_list, data[i].姓名)] = data[i].履历;
}
for (var i = 0; i < self.record_list.length; i++) {
self.record_list[i] = self.record_list[i].toString().split('\n');
}
self.draw_line();
self.draw_text();
})
},
draw_text: function() {
var self = this;
self.remove_text();
self.graph_text_class = self.view.append("g")
.attr("id", "view3_text")
.selectAll("text")
.data(self.name_list)
.enter().append("text")
.attr("x", function(d, i) {
return self.width * 0.07;
})
.attr("y", function(d, i) {
return self.height * 0.8 - self.height * 0.7 / (self.name_list.length + 1) * (i + 1);
})
.text(function(d) {
return d
})
},
draw_line: function() {
var self = this;
self.remove_line();
self.graph_line_class = new Array(self.name_list.length);
for (var index = 0; index < self.name_list.length; index++) {
self.graph_line_class[index] = self.view.append("g")
.attr("id", "view3_line" + index)
.attr("stroke-width", 3)
.attr("stroke", self.stroke_color)
.selectAll("line")
.data(self.generate_line_data(index))
.enter().append("line")
.attr("x1", function(d, i) {
return d.x1;
})
.attr("x2", function(d, i) {
return d.x2;
})
.attr("y1", function(d, i) {
return d.y1;
})
.attr("y2", function(d, i) {
return d.y2;
})
}
for (var index = 0; index < self.name_list.length; index++) {
self.graph_line_class[index].attr("opacity", function(d, i) {
return (i % 2 == 1) ? 1 : 0.5;
})
.on("mouseover", function(d, i) {
for (var index = 0; index < location_list.length; index++) {
if (d.content.search(location_list[index]) != -1)
draw_view2.highlight_location(location_list[index], 1);
}
d3.select(this).attr("stroke-width", 4)
.attr("stroke", "red");
tooltip.html(d.content)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY + 20) + "px")
.style("opacity", 1);
})
.on("mouseout", function(d, i) {
d3.select(this).attr("stroke-width", 3)
.attr("stroke", self.stroke_color)
tooltip.style("opacity", 0.0);
})
}
},
generate_line_data: function(index) {
var self = this;
console.log(self.record_list);
function LINE() {}
LINE.prototype.id = 0;
LINE.prototype.x1 = 0;
LINE.prototype.x2 = 0;
LINE.prototype.y1 = 0;
LINE.prototype.y2 = 0;
LINE.prototype.content = 0;
var line_class = new Array(),
tmp;
for (var i = 0; i < self.record_list[index].length; i++) {
tmp = self.record_list[index][i];
tmp = tmp.toString().split('—');
console.log("------------");
console.log(parseInt(tmp[0]));
console.log(parseInt(tmp[1]));
console.log("------------");
if (isNaN(parseInt(tmp[1]))) tmp[1] = 2018;
line_class[i] = new LINE();
line_class[i].id = index;
line_class[i].content = self.record_list[index][i];
line_class[i].x1 = self.width * 0.1 + self.xScale(Math.abs(parseInt(tmp[0])));
line_class[i].x2 = self.width * 0.1 + self.xScale(Math.abs(parseInt(tmp[1])));
line_class[i].y1 = self.height * 0.8 - self.height * 0.7 / (self.record_list.length + 1) * (index + 1);
line_class[i].y2 = self.height * 0.8 - self.height * 0.7 / (self.record_list.length + 1) * (index + 1);
}
return line_class;
},
remove_line: function() {
var self = this;
for (var i = 0; i < self.name_list.length; i++)
self.view.select("#view3_line" + i).remove();
},
remove_text: function() {
var self = this;
self.view.select("#view3_text").remove();
}
} | .attr("height", self.height); |
movements.py | '''
Created on Nov 29, 2020
@author: manik
'''
'''
File with classes and code which control how a particular person
will move and to where
'''
from src.population import Population
import numpy as np
import src.person_properties_util as idx
class Movement():
"""
Class providing abstraction into each movement of the population
"""
def | (self, persons: np.ndarray, size: int, speed: float = 0.1, heading_update_chance: float = 0.02) -> np.ndarray:
"""
Randomly updates/initializes the destination each person is headed to and corresponding speed randomly
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
size : int
The size of the array of the persons to be updated to
speed : float, optional
Mean of the speed to be generated randomly, by default 0.1
heading_update_chance : float, optional
The odds of updating the destination of each person, by default 0.02
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
#For updating the x position
#Generate a random array with update chance for each person in the population
update = np.random.random(size=(size,))
#Get the persons in the population who have a lower or equal to chance of getting updated in this epoch
shp = update[update <= heading_update_chance].shape
#Update the position for the direction in which they are heading
persons[:,idx.x_dir][update <= heading_update_chance] = np.random.normal(loc = 0, scale = 1/3, size = shp)
#For updating the y position, do the same
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:,idx.y_dir][update <= heading_update_chance] = np.random.normal(loc = 0, scale = 1/3, size = shp)
#Update the speed by generating a random normal distribution using the argument speed as the parameter
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:,idx.speed][update <= heading_update_chance] = np.random.normal(loc = speed, scale = speed / 3, size = shp)
persons[:,idx.speed] = np.clip(persons[:,idx.speed], a_min=0.0005, a_max=0.01)
#Return the updated array
return persons
def out_of_bounds(self, persons: np.ndarray, xbounds, ybounds):
"""
Check if the individual is heading out of bounds of the specified bounds.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the individuals
xbounds : list
List containing bounds for X axis.
ybounds : list
List containing bounds for Y axis.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# Store shape of list of people who are heading out of bounds based on X bound [0]
shp = persons[:,4][(persons[:,2] <= xbounds[:,0]) &
(persons[:,4] < 0)].shape
# Update them randomly using a normal distribution
persons[:,4][(persons[:,2] <= xbounds[:,0]) &
(persons[:,4] < 0)] = np.clip(np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = 0.05, a_max = 1)
# Store shape of list of people who are heading out of bounds based on X bound [1]
shp = persons[:,4][(persons[:,2] >= xbounds[:,1]) &
(persons[:,4] > 0)].shape
# Update them randomly using a normal distribution
persons[:,4][(persons[:,2] >= xbounds[:,1]) &
(persons[:,4] > 0)] = np.clip(-np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = -1, a_max = -0.05)
# Store shape of list of people who are heading out of bounds based on Y bound [0]
shp = persons[:,5][(persons[:,3] <= ybounds[:,0]) &
(persons[:,5] < 0)].shape
# Update them randomly using a normal distribution
persons[:,5][(persons[:,3] <= ybounds[:,0]) &
(persons[:,5] < 0)] = np.clip(np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = 0.05, a_max = 1)
# Store shape of list of people who are heading out of bounds based on Y bound [1]
shp = persons[:,5][(persons[:,3] >= ybounds[:,1]) &
(persons[:,5] > 0)].shape
# Update them randomly using a normal distribution
persons[:,5][(persons[:,3] >= ybounds[:,1]) &
(persons[:,5] > 0)] = np.clip(-np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = -1, a_max = -0.05)
return persons
def update_pop(self, persons):
"""
Update function to move people physically in the graph.
This function adds the X and Y direction value to the current postion of
the individual to move them.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
filter = (persons[:, idx.current_state] != 3) & (persons[:, idx.social_distance] == 0)
#x
persons[:,2][filter] = persons[:,2][filter] + (persons[:,4][filter] * persons[:,6][filter])
#y
persons[:,3][filter] = persons[:,3][filter] + (persons [:,5][filter] * persons[:,6][filter])
return persons
| update_persons |
dcel.rs | // Copyright 2017 The Spade Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Handle to a vertex.
///
/// This handle is "fixed", meaning it is intended to be used for
/// mutation (e.g., removing a vertex) or storage (e.g., storing
/// references to vertices for later usage).
pub type FixedVertexHandle = usize;
/// Handle to an edge.
///
/// This handle is "fixed", meaning it is intended to be used
/// for storage. Note that removal operations will invalidate
/// edge handles.
pub type FixedEdgeHandle = usize;
/// Handle to a face.
///
/// This handle is "fixed", meaning it is intended to be used
/// for storage. Note that removal operations will invalidate
/// face handles.
pub type FixedFaceHandle = usize;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct VertexRemovalResult<V> {
pub updated_vertex: Option<FixedVertexHandle>,
pub data: V,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde_serialize", derive(Serialize, Deserialize))]
struct FaceEntry {
adjacent_edge: Option<FixedEdgeHandle>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde_serialize", derive(Serialize, Deserialize))]
struct VertexEntry<V> {
data: V,
out_edge: Option<FixedEdgeHandle>,
}
impl<V> VertexEntry<V> {
fn new(data: V) -> VertexEntry<V> {
VertexEntry {
data,
out_edge: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde_serialize", derive(Serialize, Deserialize))]
struct HalfEdgeEntry<T> {
next: FixedEdgeHandle,
prev: FixedEdgeHandle,
twin: FixedEdgeHandle,
origin: FixedVertexHandle,
face: FixedFaceHandle,
data: T,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde_serialize", derive(Serialize, Deserialize))]
pub struct DCEL<V, E = ()> {
vertices: Vec<VertexEntry<V>>,
faces: Vec<FaceEntry>,
edges: Vec<HalfEdgeEntry<E>>,
}
impl<V> DCEL<V> {
pub fn new() -> Self {
Self::new_with_edge()
}
}
impl<V, E> DCEL<V, E>
where
E: Default,
{
pub fn new_with_edge() -> Self {
DCEL {
vertices: Vec::new(),
edges: Vec::new(),
faces: vec![FaceEntry {
adjacent_edge: None,
}],
}
}
pub fn num_vertices(&self) -> usize {
self.vertices.len()
}
pub fn num_edges(&self) -> usize {
self.edges.len() / 2
}
pub fn num_faces(&self) -> usize {
self.faces.len()
}
pub fn vertex(&self, handle: FixedVertexHandle) -> VertexHandle<V, E> {
VertexHandle::new(self, handle)
}
pub fn edge(&self, handle: FixedEdgeHandle) -> EdgeHandle<V, E> {
EdgeHandle::new(self, handle)
}
pub fn edge_data(&self, handle: FixedEdgeHandle) -> &E {
&self.edges[handle].data
}
pub fn edge_data_mut(&mut self, handle: FixedEdgeHandle) -> &mut E {
&mut self.edges[handle].data
}
pub fn face(&self, handle: FixedFaceHandle) -> FaceHandle<V, E> {
FaceHandle::new(self, handle)
}
pub fn vertex_mut(&mut self, handle: FixedVertexHandle) -> &mut V {
&mut self.vertices[handle].data
}
pub fn insert_vertex(&mut self, vertex: V) -> FixedVertexHandle {
self.vertices.push(VertexEntry::new(vertex));
self.vertices.len() - 1
}
pub fn get_edge_from_neighbors(
&self,
from: FixedVertexHandle,
to: FixedVertexHandle,
) -> Option<EdgeHandle<V, E>> {
let vertex = self.vertex(from);
for edge in vertex.ccw_out_edges() {
if edge.to().fix() == to {
return Some(edge);
}
}
None
}
pub fn connect_two_isolated_vertices(
&mut self,
v0: FixedVertexHandle,
v1: FixedVertexHandle,
face: FixedFaceHandle,
) -> FixedEdgeHandle {
assert!(self.vertices[v0].out_edge.is_none(), "v0 is not isolated");
assert!(self.vertices[v1].out_edge.is_none(), "v1 is not isolated");
assert!(
self.faces[face].adjacent_edge.is_none(),
"face must not contain any adjacent edges"
);
let edge_index = self.edges.len();
let twin_index = edge_index + 1;
let edge = HalfEdgeEntry {
next: twin_index,
prev: twin_index,
twin: twin_index,
origin: v0,
face,
data: Default::default(),
};
self.edges.push(edge);
let twin = HalfEdgeEntry {
next: edge_index,
prev: edge_index,
twin: edge_index,
origin: v1,
face,
data: Default::default(),
};
self.edges.push(twin);
self.vertices[v0].out_edge = Some(edge_index);
self.vertices[v1].out_edge = Some(twin_index);
self.faces[face].adjacent_edge = Some(edge_index);
edge_index
}
pub fn update_vertex(&mut self, handle: FixedVertexHandle, data: V) {
self.vertices[handle].data = data;
}
pub fn edges(&self) -> EdgesIterator<V, E> {
EdgesIterator::new(&self)
}
pub fn vertices(&self) -> VerticesIterator<V, E> {
VerticesIterator::new(&self)
}
pub fn fixed_vertices(&self) -> FixedVerticesIterator {
0..self.num_vertices()
}
pub fn faces(&self) -> FacesIterator<V, E> {
FacesIterator::new(&self)
}
}
impl<V, E> DCEL<V, E>
where
E: Default + Copy,
{
pub fn connect_edge_to_isolated_vertex(
&mut self,
prev_handle: FixedEdgeHandle,
vertex: FixedVertexHandle,
) -> FixedEdgeHandle {
assert!(
self.vertices[vertex].out_edge.is_none(),
"Given vertex is not isolated"
);
let prev = self.edges[prev_handle];
let edge_index = self.edges.len();
let twin_index = edge_index + 1;
let edge = HalfEdgeEntry {
next: twin_index,
prev: prev_handle,
twin: twin_index,
origin: self.edges[prev.twin].origin,
face: prev.face,
data: Default::default(),
};
self.edges.push(edge);
let twin = HalfEdgeEntry {
next: prev.next,
prev: edge_index,
twin: edge_index,
origin: vertex,
face: prev.face,
data: Default::default(),
};
self.edges.push(twin);
self.edges[prev_handle].next = edge_index;
self.edges[prev.next].prev = twin_index;
self.vertices[vertex].out_edge = Some(twin_index);
edge_index
}
pub fn remove_vertex(
&mut self,
vertex_handle: FixedVertexHandle,
remaining_face: Option<FixedFaceHandle>,
) -> VertexRemovalResult<V> {
while let Some(out_edge) = self.vertices[vertex_handle].out_edge {
self.remove_edge(out_edge, remaining_face);
}
let data = self.vertices.swap_remove(vertex_handle).data;
let updated_vertex = if self.vertices.len() == vertex_handle {
None
} else {
// Update origin of all out edges
let to_update: Vec<_> = self
.vertex(vertex_handle)
.ccw_out_edges()
.map(|e| e.fix())
.collect();
for e in to_update {
self.edges[e].origin = vertex_handle;
}
Some(self.vertices.len())
};
VertexRemovalResult {
updated_vertex,
data,
}
}
pub fn connect_edge_to_edge(
&mut self,
prev_edge_handle: FixedEdgeHandle,
next_edge_handle: FixedEdgeHandle,
) -> FixedEdgeHandle {
let edge_index = self.edges.len();
let twin_index = edge_index + 1;
let next_edge = self.edges[next_edge_handle];
let prev_edge = self.edges[prev_edge_handle];
let edge = HalfEdgeEntry {
next: next_edge_handle,
prev: prev_edge_handle,
twin: twin_index,
origin: self.edges[prev_edge.twin].origin,
face: next_edge.face,
data: Default::default(),
};
self.edges.push(edge);
let twin = HalfEdgeEntry {
next: prev_edge.next,
prev: next_edge.prev,
twin: edge_index,
origin: next_edge.origin,
face: next_edge.face,
data: Default::default(),
};
self.edges.push(twin);
self.edges[next_edge_handle].prev = edge_index;
self.edges[prev_edge_handle].next = edge_index;
self.edges[next_edge.prev].next = twin_index;
self.edges[prev_edge.next].prev = twin_index;
edge_index
}
pub fn split_edge(
&mut self,
edge_handle: FixedEdgeHandle,
split_vertex: FixedVertexHandle,
) -> FixedEdgeHandle {
assert!(
self.vertices[split_vertex].out_edge.is_none(),
"Given vertex must be isolated"
);
let edge = self.edges[edge_handle];
let twin = self.edges[edge.twin];
let is_isolated = edge.next == edge.twin;
let new_edge_index = self.edges.len();
let new_twin_index = new_edge_index + 1;
let (new_edge_next, new_twin_prev) = if is_isolated {
(new_twin_index, new_edge_index)
} else {
(edge.next, twin.prev)
};
let new_edge = HalfEdgeEntry {
next: new_edge_next,
prev: edge_handle,
twin: new_twin_index,
origin: split_vertex,
face: edge.face,
data: Default::default(),
};
let new_twin = HalfEdgeEntry {
next: edge.twin,
prev: new_twin_prev,
twin: new_edge_index,
origin: twin.origin,
face: twin.face,
data: Default::default(),
};
if !is_isolated {
self.edges[edge.next].prev = new_edge_index;
self.edges[twin.prev].next = new_twin_index;
}
self.edges[edge.twin].prev = new_twin_index;
self.edges[edge_handle].next = new_edge_index;
self.edges[edge.twin].origin = split_vertex;
self.vertices[twin.origin].out_edge = Some(new_twin_index);
self.vertices[split_vertex].out_edge = Some(new_edge_index);
self.edges.push(new_edge);
self.edges.push(new_twin);
new_edge_index
}
pub fn remove_edge(
&mut self,
edge_handle: FixedEdgeHandle,
remaining_face: Option<FixedFaceHandle>,
) {
let edge = self.edges[edge_handle];
let twin = self.edges[edge.twin];
self.edges[edge.prev].next = twin.next;
self.edges[twin.next].prev = edge.prev;
self.edges[edge.next].prev = twin.prev;
self.edges[twin.prev].next = edge.next;
let (to_remove, to_keep) = if remaining_face == Some(twin.face) {
(edge, twin)
} else {
(twin, edge)
};
if edge.prev == edge.twin && edge.next == edge.twin {
// We remove an isolated edge
self.faces[to_keep.face].adjacent_edge = None;
} else {
let new_adjacent_edge = if edge.prev != edge.twin {
edge.prev
} else {
edge.next
};
self.faces[to_keep.face].adjacent_edge = Some(new_adjacent_edge);
self.edges[new_adjacent_edge].face = to_keep.face;
}
if edge.prev == edge.twin {
self.vertices[edge.origin].out_edge = None;
} else {
self.vertices[edge.origin].out_edge = Some(twin.next);
}
if edge.next == edge.twin {
self.vertices[twin.origin].out_edge = None;
} else {
self.vertices[twin.origin].out_edge = Some(edge.next);
}
// We must remove the larger index first to prevent the other edge
// from being updated
if edge_handle > edge.twin {
self.swap_out_edge(edge_handle);
self.swap_out_edge(edge.twin);
} else {
self.swap_out_edge(edge.twin);
self.swap_out_edge(edge_handle);
}
if edge.face != twin.face {
let neighs: Vec<_> = self
.face(to_keep.face)
.adjacent_edges()
.map(|e| e.fix())
.collect();
for n in neighs {
self.edges[n].face = to_keep.face
}
self.remove_face(to_remove.face);
}
}
fn remove_face(&mut self, face: FixedFaceHandle) {
self.faces.swap_remove(face);
if self.faces.len() > face {
let neighs: Vec<_> = self.face(face).adjacent_edges().map(|e| e.fix()).collect();
for n in neighs {
self.edges[n].face = face;
}
}
}
fn swap_out_edge(&mut self, edge_handle: FixedEdgeHandle) {
self.edges.swap_remove(edge_handle);
if self.edges.len() > edge_handle {
// Update edge index
let old_handle = self.edges.len();
let edge = self.edges[edge_handle];
self.edges[edge.next].prev = edge_handle;
self.edges[edge.prev].next = edge_handle;
self.edges[edge.twin].twin = edge_handle;
if self.vertices[edge.origin].out_edge == Some(old_handle) {
self.vertices[edge.origin].out_edge = Some(edge_handle);
}
self.faces[edge.face].adjacent_edge = Some(edge_handle);
}
}
pub fn create_face(
&mut self,
prev_edge_handle: FixedEdgeHandle,
next_edge_handle: FixedEdgeHandle,
) -> FixedEdgeHandle {
let edge_index = self.connect_edge_to_edge(prev_edge_handle, next_edge_handle);
let new_face = self.num_faces();
self.faces.push(FaceEntry {
adjacent_edge: Some(edge_index),
});
// Set the face to the left of the new edge
let mut cur_edge = edge_index;
loop {
self.edges[cur_edge].face = new_face;
cur_edge = self.edges[cur_edge].next;
if cur_edge == edge_index {
break;
}
}
let twin = self.edges[edge_index].twin;
self.faces[self.edges[twin].face].adjacent_edge = Some(twin);
edge_index
}
pub fn flip_cw(&mut self, e: FixedEdgeHandle) {
let en = self.edges[e].next;
let ep = self.edges[e].prev;
let t = self.edges[e].twin;
let tn = self.edges[t].next;
let tp = self.edges[t].prev;
self.edges[en].next = e;
self.edges[en].prev = tp;
self.edges[e].next = tp;
self.edges[e].prev = en;
self.edges[tp].next = en;
self.edges[tp].prev = e;
self.edges[tn].next = t;
self.edges[tn].prev = ep;
self.edges[t].next = ep;
self.edges[t].prev = tn;
self.edges[ep].next = tn;
self.edges[ep].prev = t;
self.vertices[self.edges[e].origin].out_edge = Some(tn);
self.vertices[self.edges[t].origin].out_edge = Some(en);
self.edges[e].origin = self.edges[ep].origin;
self.edges[t].origin = self.edges[tp].origin;
self.faces[self.edges[e].face].adjacent_edge = Some(e);
self.faces[self.edges[t].face].adjacent_edge = Some(t);
self.edges[tp].face = self.edges[e].face;
self.edges[ep].face = self.edges[t].face;
}
#[cfg(test)]
pub fn sanity_check(&self) {
for (index, face) in self.faces.iter().enumerate() {
if let Some(adj) = face.adjacent_edge {
assert_eq!(self.edges[adj].face, index);
}
}
for (index, vertex) in self.vertices.iter().enumerate() {
if let Some(out_edge) = vertex.out_edge {
assert_eq!(self.edges[out_edge].origin, index);
}
}
for handle in 0..self.num_edges() {
let edge = self.edge(handle);
assert_eq!(edge, edge.o_next().o_prev());
assert_eq!(edge, edge.o_prev().o_next());
assert_eq!(edge, edge.sym().sym());
}
}
}
impl<V, E> DCEL<V, E>
where
E: ::std::fmt::Debug,
{
#[cfg(test)]
fn print(&self) {
for (index, edge) in self.edges.iter().enumerate() {
println!("edge {}: {:#?}", index, edge);
}
for (index, vertex) in self.vertices.iter().enumerate() {
println!("vertex {}: {:?}", index, vertex.out_edge);
}
for (index, face) in self.faces.iter().enumerate() {
println!("face {}: {:?}", index, face);
}
}
}
/// An iterator that iterates over the edges adjacent to a face.
///
/// The iterator will traverse the edges in oriented order.
/// This order is counterclockwise for right handed coordinate systems
/// or clockwise for left handed systems.
pub struct ONextIterator<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
cur_until: Option<(FixedEdgeHandle, FixedEdgeHandle)>,
}
impl<'a, V, E> ONextIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
fn new_empty(dcel: &'a DCEL<V, E>) -> Self {
ONextIterator {
dcel,
cur_until: None,
}
}
fn new(dcel: &'a DCEL<V, E>, edge: FixedEdgeHandle) -> Self {
let edge = dcel.edge(edge);
ONextIterator {
dcel,
cur_until: Some((edge.fix(), edge.o_prev().fix())),
}
}
}
impl<'a, V, E> Iterator for ONextIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
type Item = EdgeHandle<'a, V, E>;
fn next(&mut self) -> Option<EdgeHandle<'a, V, E>> {
if let Some((cur, until)) = self.cur_until {
let cur_handle = self.dcel.edge(cur);
if cur == until {
self.cur_until = None;
} else {
let new_cur = cur_handle.o_next().fix();
self.cur_until = Some((new_cur, until));
}
Some(cur_handle)
} else {
None
}
}
}
impl<'a, V, E> DoubleEndedIterator for ONextIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
fn next_back(&mut self) -> Option<EdgeHandle<'a, V, E>> {
if let Some((cur, until)) = self.cur_until {
let until_handle = self.dcel.edge(until);
if cur == until {
self.cur_until = None;
} else {
let new_until = until_handle.o_prev().fix();
self.cur_until = Some((cur, new_until));
}
Some(until_handle)
} else {
None
}
}
}
/// An iterator that iterates over the outgoing edges from a vertex.
///
/// The edges will be iterated in counterclockwise order. Note that
/// this assumes that you use a right handed coordinate system,
/// otherwise the sense of orientation is inverted.
pub struct CCWIterator<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
cur_until: Option<(FixedEdgeHandle, FixedEdgeHandle)>,
}
impl<'a, V, E> CCWIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
fn new(dcel: &'a DCEL<V, E>, vertex: FixedVertexHandle) -> Self {
let cur_until = if let Some(edge) = dcel.vertex(vertex).out_edge() {
Some((edge.ccw().fix(), edge.fix()))
} else {
None
};
CCWIterator { dcel, cur_until }
}
fn from_edge(dcel: &'a DCEL<V, E>, edge: FixedEdgeHandle) -> Self {
let edge = dcel.edge(edge);
CCWIterator {
dcel,
cur_until: Some((edge.fix(), edge.cw().fix())),
}
}
}
impl<'a, V, E> Iterator for CCWIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
type Item = EdgeHandle<'a, V, E>;
fn next(&mut self) -> Option<EdgeHandle<'a, V, E>> {
if let Some((cur, until)) = self.cur_until {
let cur_handle = self.dcel.edge(cur);
if cur == until {
self.cur_until = None;
} else {
let new_cur = cur_handle.ccw().fix();
self.cur_until = Some((new_cur, until));
}
Some(cur_handle)
} else {
None
}
}
}
impl<'a, V, E> DoubleEndedIterator for CCWIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
fn next_back(&mut self) -> Option<EdgeHandle<'a, V, E>> {
if let Some((cur, until)) = self.cur_until {
let until_handle = self.dcel.edge(until);
if cur == until {
self.cur_until = None;
} else {
let new_until = until_handle.cw().fix();
self.cur_until = Some((cur, new_until));
}
Some(until_handle)
} else {
None
}
}
}
pub struct FacesIterator<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
current: FixedFaceHandle,
}
impl<'a, V, E> FacesIterator<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>) -> Self {
FacesIterator { dcel, current: 0 }
}
}
impl<'a, V, E> Iterator for FacesIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
type Item = FaceHandle<'a, V, E>;
fn next(&mut self) -> Option<FaceHandle<'a, V, E>> {
if self.current < self.dcel.num_faces() {
let result = FaceHandle::new(self.dcel, self.current);
self.current += 1;
Some(result)
} else {
None
}
}
}
type FixedVerticesIterator = ::std::ops::Range<usize>;
pub struct VerticesIterator<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
current: FixedVertexHandle,
}
impl<'a, V, E> VerticesIterator<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>) -> Self {
VerticesIterator { dcel, current: 0 }
}
}
impl<'a, V, E> Iterator for VerticesIterator<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
type Item = VertexHandle<'a, V, E>;
fn next(&mut self) -> Option<VertexHandle<'a, V, E>> {
if self.current < self.dcel.num_vertices() {
let result = VertexHandle::new(self.dcel, self.current);
self.current += 1;
Some(result)
} else {
None
}
}
}
pub struct EdgesIterator<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
current: FixedEdgeHandle,
}
impl<'a, V, E> EdgesIterator<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>) -> Self {
EdgesIterator { dcel, current: 0 }
}
}
impl<'a, V, E> Iterator for EdgesIterator<'a, V, E>
where
E: Default,
{
type Item = EdgeHandle<'a, V, E>;
fn next(&mut self) -> Option<EdgeHandle<'a, V, E>> {
if let Some(edge) = self.dcel.edges.get(self.current) {
let twin = edge.twin;
self.current += 1;
if self.current - 1 < twin {
Some(EdgeHandle::new(self.dcel, self.current - 1))
} else {
self.next()
}
} else {
None
}
}
}
/// A handle to a directed edge.
///
/// Used to retrieve adjacent vertices and faces.
pub struct EdgeHandle<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
handle: FixedEdgeHandle,
}
/// A handle to a vertex.
///
/// Used to retrieve its outgoing edges.
pub struct VertexHandle<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
handle: FixedVertexHandle,
}
/// A handle to a face.
///
/// Used to retrieve its adjacent edges.
pub struct FaceHandle<'a, V, E = ()>
where
V: 'a,
E: 'a,
{
dcel: &'a DCEL<V, E>,
handle: FixedFaceHandle,
}
impl<'a, V, E> ::std::fmt::Debug for VertexHandle<'a, V, E>
where
V: 'a,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "VertexHandle({:?})", self.handle)
}
}
impl<'a, V, E> PartialEq for VertexHandle<'a, V, E>
where
V: 'a,
{
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle
}
}
impl<'a, V, E> Copy for VertexHandle<'a, V, E> where V: 'a {}
impl<'a, V, E> VertexHandle<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>, handle: FixedVertexHandle) -> Self {
VertexHandle { dcel, handle }
}
}
impl<'a, V, E> VertexHandle<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
/// Returns an outgoing edge.
///
/// If the vertex has multiple outgoing edges, any of them is returned.
pub fn out_edge(&self) -> Option<EdgeHandle<'a, V, E>> {
self.dcel.vertices[self.handle]
.out_edge
.map(|e| self.dcel.edge(e))
}
/// Returns all outgoing edges in counter clockwise order.
///
/// Note that this assumes that you use a right handed coordinate system,
/// otherwise the sense of orientation is inverted.
pub fn ccw_out_edges(&self) -> CCWIterator<'a, V, E> {
CCWIterator::new(self.dcel, self.handle)
}
/// Creates a fixed vertex handle from this dynamic handle.
///
/// # Notes
///
/// Calling `DelaunayTriangulation::insert()` will create vertices in increasing order.
pub fn fix(&self) -> FixedVertexHandle {
self.handle
}
}
impl<'a, V, E> Clone for VertexHandle<'a, V, E>
where
V: 'a,
E: 'a,
{
fn clone(&self) -> Self {
VertexHandle::new(self.dcel, self.handle)
}
}
impl<'a, V, E> ::std::ops::Deref for VertexHandle<'a, V, E> {
type Target = V;
fn deref(&self) -> &V {
&self.dcel.vertices[self.handle].data
}
}
impl<'a, V, E> Copy for EdgeHandle<'a, V, E> where V: 'a {}
impl<'a, V, E> Clone for EdgeHandle<'a, V, E>
where
V: 'a,
{
fn clone(&self) -> Self {
EdgeHandle::new(self.dcel, self.handle)
}
}
impl<'a, V, E> PartialEq for EdgeHandle<'a, V, E>
where
V: 'a,
{
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle
}
}
impl<'a, V, E> ::std::fmt::Debug for EdgeHandle<'a, V, E>
where
V: 'a,
E: Default,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(
f,
"EdgeHandle - id: {:?} ({:?} -> {:?})",
self.handle,
self.from().fix(),
self.to().fix()
)
}
}
impl<'a, V, E> EdgeHandle<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>, handle: FixedEdgeHandle) -> Self {
EdgeHandle { dcel, handle }
}
}
impl<'a, V, E> EdgeHandle<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
/// Creates a fixed edge handle from this dynamic handle.
pub fn fix(&self) -> FixedEdgeHandle {
self.handle
}
/// Returns the edge's source vertex.
pub fn from(&self) -> VertexHandle<'a, V, E> {
let edge = &self.dcel.edges[self.handle];
VertexHandle::new(self.dcel, edge.origin)
}
/// Returns the oriented next edge.
///
/// The oriented next edge shares the same face as this edge.
/// When traversing the face's edges in oriented order,
/// this edge is the predecessor of the oriented next edge.
/// "Oriented" means counterclockwise for right handed
/// coordinate systems.
pub fn o_next(&self) -> EdgeHandle<'a, V, E> {
EdgeHandle::new(self.dcel, self.dcel.edges[self.handle].next)
}
/// Returns the oriented previous edge.
///
/// The oriented previous edge shares the same face as this edge.
/// When traversing the face's edges in oriented order,
/// this edge is the successor of the oriented previous edge.
/// "Oriented" means counterclockwise for right handed
/// coordinate systems.
pub fn o_prev(&self) -> EdgeHandle<'a, V, E> {
EdgeHandle::new(self.dcel, self.dcel.edges[self.handle].prev)
}
/// Returns an iterator over all edges sharing the same face
/// as this edge.
///
/// The face's edges will be traversed in oriented order.
/// This order is counterclockwise for right handed coordinate
/// systems or clockwise for left handed systems.
pub fn o_next_iterator(&self) -> ONextIterator<'a, V, E> {
ONextIterator::new(self.dcel, self.handle)
}
/// Returns the edges destination vertex.
pub fn to(&self) -> VertexHandle<'a, V, E> {
self.sym().from()
}
/// Returns the face located to the left of this edge.
pub fn face(&self) -> FaceHandle<'a, V, E> {
self.dcel.face(self.dcel.edges[self.handle].face)
}
/// Returns this edge's mirror edge.
pub fn sym(&self) -> EdgeHandle<'a, V, E> {
EdgeHandle {
dcel: self.dcel,
handle: self.dcel.edges[self.handle].twin,
}
}
/// Returns the next edge in clockwise direction.
///
/// Note that this assumes that you use a right handed coordinate system,
/// otherwise the sense of orientation is inverted.
pub fn cw(&self) -> EdgeHandle<'a, V, E> {
let twin = self.sym().handle;
EdgeHandle {
dcel: self.dcel,
handle: self.dcel.edges[twin].next,
}
}
/// Returns the next edge in counter clockwise direction.
///
/// Note that this assumes that you use a right handed coordinate system,
/// otherwise the sense of orientation is inverted.
pub fn ccw(&self) -> EdgeHandle<'a, V, E> {
EdgeHandle {
dcel: self.dcel,
handle: self.dcel.edges[self.handle].prev,
}
.sym()
}
/// Returns an iterator over all edges in counter clockwise
/// order.
///
/// Note that this assumes that you use a right handed coordinate system,
/// otherwise the sense of orientation is inverted.
pub fn ccw_iter(&self) -> CCWIterator<'a, V, E> |
}
impl<'a, V, E> Copy for FaceHandle<'a, V, E> where V: 'a {}
impl<'a, V, E> Clone for FaceHandle<'a, V, E>
where
V: 'a,
{
fn clone(&self) -> Self {
FaceHandle::new(self.dcel, self.handle)
}
}
impl<'a, V, E> PartialEq for FaceHandle<'a, V, E>
where
V: 'a,
{
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle
}
}
impl<'a, V, E> ::std::fmt::Debug for FaceHandle<'a, V, E>
where
V: 'a,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "FaceHandle({:?})", self.handle)
}
}
impl<'a, V, E> FaceHandle<'a, V, E>
where
V: 'a,
E: 'a,
{
fn new(dcel: &'a DCEL<V, E>, handle: FixedFaceHandle) -> Self {
FaceHandle { dcel, handle }
}
}
impl<'a, V, E> FaceHandle<'a, V, E>
where
V: 'a,
E: Default + 'a,
{
/// Tries to interpret this face as a triangle, returning its 3 vertices.
///
/// # Notes
///
/// The retuned vertices are in clockwise order. The position of the first one is undefined.
///
/// # Panic
/// This method will panic if the face does not form a triangle, for example if it is called on the [infinite face].
///
/// [infinite face]: struct.DelaunayTriangulation.html#method.infinite_face
pub fn as_triangle(&self) -> [VertexHandle<'a, V, E>; 3] {
let adjacent = self.dcel.faces[self.handle]
.adjacent_edge
.expect("Face has no adjacent edge");
let edge = self.dcel.edge(adjacent);
let prev = edge.o_prev();
debug_assert!(
prev.o_prev() == edge.o_next(),
"Face does not form a triangle"
);
[prev.from(), edge.from(), edge.to()]
}
/// Returns an edge that is adjacent to this face.
///
/// If this face has multiple adjacent edges, any of them is returned.
pub fn adjacent_edge(&self) -> Option<EdgeHandle<'a, V, E>> {
self.dcel.faces[self.handle]
.adjacent_edge
.map(|e| EdgeHandle::new(self.dcel, e))
}
/// Returns an iterator that iterates over all adjacent edges.
///
/// The edges are traversed in oriented order.
/// This order will be counterclockwise for right handed coordinate
/// system or clockwise for left handed systems.
pub fn adjacent_edges(&self) -> ONextIterator<'a, V, E> {
if let Some(adj) = self.dcel.faces[self.handle].adjacent_edge {
ONextIterator::new(self.dcel, adj)
} else {
ONextIterator::new_empty(self.dcel)
}
}
/// Creates a fixed face handle from this dynamic face handle.
pub fn fix(&self) -> FixedFaceHandle {
self.handle
}
}
#[cfg(test)]
mod test {
use super::{HalfEdgeEntry, DCEL};
#[test]
fn test_create_triangle() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let v2 = dcel.insert_vertex(());
let e01 = dcel.connect_two_isolated_vertices(v0, v1, 0);
let e12 = dcel.connect_edge_to_isolated_vertex(e01, v2);
let e20 = dcel.create_face(e12, e01);
let t01 = dcel.edges[e01].twin;
let t12 = dcel.edges[e12].twin;
let t20 = dcel.edges[e20].twin;
assert_eq!(
dcel.edges[e01],
HalfEdgeEntry {
next: e12,
prev: e20,
twin: t01,
origin: 0,
face: 1,
data: (),
}
);
assert_eq!(
dcel.edges[e12],
HalfEdgeEntry {
next: e20,
prev: e01,
twin: t12,
origin: 1,
face: 1,
data: (),
}
);
assert_eq!(
dcel.edges[e20],
HalfEdgeEntry {
next: e01,
prev: e12,
twin: t20,
origin: 2,
face: 1,
data: (),
}
);
assert_eq!(dcel.edges[t01].face, 0);
assert_eq!(dcel.edges[t12].face, 0);
assert_eq!(dcel.edges[t20].face, 0);
}
#[test]
fn test_flip() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let v2 = dcel.insert_vertex(());
let v3 = dcel.insert_vertex(());
let e01 = dcel.connect_two_isolated_vertices(v0, v1, 0);
let e12 = dcel.connect_edge_to_isolated_vertex(e01, v2);
let e23 = dcel.connect_edge_to_isolated_vertex(e12, v3);
let e30 = dcel.create_face(e23, e01);
let e_flip = dcel.create_face(e30, e23);
assert_eq!(
dcel.edges[e_flip],
HalfEdgeEntry {
next: e23,
prev: e30,
twin: dcel.edges[e_flip].twin,
origin: 0,
face: 2,
data: (),
}
);
dcel.flip_cw(e_flip);
let twin = dcel.edges[e_flip].twin;
assert_eq!(
dcel.edges[e_flip],
HalfEdgeEntry {
next: e12,
prev: e23,
twin,
origin: 3,
face: 2,
data: (),
}
);
assert_eq!(
dcel.edges[twin],
HalfEdgeEntry {
next: e30,
prev: e01,
twin: e_flip,
origin: 1,
face: 1,
data: (),
}
);
}
#[test]
fn test_split_isolated_edge() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let edge = dcel.connect_two_isolated_vertices(v0, v1, 0);
let split_vertex = dcel.insert_vertex(());
dcel.split_edge(edge, split_vertex);
dcel.print();
dcel.sanity_check();
}
#[test]
fn test_split_unisolated() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let v2 = dcel.insert_vertex(());
let v3 = dcel.insert_vertex(());
let e01 = dcel.connect_two_isolated_vertices(v0, v1, 0);
let t01 = dcel.edge(e01).sym().fix();
let e12 = dcel.connect_edge_to_isolated_vertex(e01, v2);
let t12 = dcel.edge(e12).sym().fix();
let e20 = dcel.create_face(e12, e01);
let t20 = dcel.edge(e20).sym().fix();
let e_split = dcel.split_edge(e20, v3);
let t_split = dcel.edge(e_split).sym().fix();
assert_eq!(
dcel.edges[e20],
HalfEdgeEntry {
next: e_split,
prev: e12,
twin: t20,
origin: v2,
face: 1,
data: (),
}
);
assert_eq!(
dcel.edges[e_split],
HalfEdgeEntry {
next: e01,
prev: e20,
twin: t_split,
origin: v3,
face: 1,
data: (),
}
);
assert_eq!(
dcel.edges[t_split],
HalfEdgeEntry {
next: t20,
prev: t01,
origin: v0,
twin: e_split,
face: 0,
data: (),
}
);
assert_eq!(
dcel.edges[t20],
HalfEdgeEntry {
next: t12,
prev: t_split,
origin: v3,
twin: e20,
face: 0,
data: (),
}
);
assert_eq!(dcel.edges[t01].next, t_split);
assert_eq!(dcel.edges[e01].prev, e_split);
assert_eq!(dcel.edges[t12].prev, t20);
assert_eq!(dcel.edges[e12].next, e20);
assert!(
dcel.vertices[v3].out_edge == Some(e_split) || dcel.vertices[v3].out_edge == Some(t20)
);
dcel.sanity_check();
}
#[test]
fn test_split_half_isolated() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let v2 = dcel.insert_vertex(());
let v_split = dcel.insert_vertex(());
let e1 = dcel.connect_two_isolated_vertices(v0, v1, 0);
let e2 = dcel.connect_edge_to_isolated_vertex(e1, v2);
dcel.split_edge(e2, v_split);
dcel.sanity_check();
}
#[test]
fn test_cw_ccw() {
let mut dcel = DCEL::new();
let v0 = dcel.insert_vertex(());
let v1 = dcel.insert_vertex(());
let v2 = dcel.insert_vertex(());
let v3 = dcel.insert_vertex(());
let e01 = dcel.connect_two_isolated_vertices(v0, v1, 0);
let e12 = dcel.connect_edge_to_isolated_vertex(e01, v2);
let e23 = dcel.connect_edge_to_isolated_vertex(e12, v3);
let e30 = dcel.create_face(e23, e01);
let e02 = dcel.create_face(e30, e23);
let e02 = dcel.edge(e02);
assert_eq!(e02.cw().fix(), e01);
assert_eq!(e02.ccw().fix(), dcel.edges[e30].twin);
}
#[test]
fn pentagon_test() {
let mut dcel = DCEL::new();
let mut v = Vec::new();
for _ in 0..5 {
v.push(dcel.insert_vertex(()));
}
let e01 = dcel.connect_two_isolated_vertices(v[0], v[1], 0);
let e12 = dcel.connect_edge_to_isolated_vertex(e01, v[2]);
let e23 = dcel.connect_edge_to_isolated_vertex(e12, v[3]);
let e34 = dcel.connect_edge_to_isolated_vertex(e23, v[4]);
let e40 = dcel.create_face(e34, e01);
let e02 = dcel.create_face(e40, e23);
let e03 = dcel.create_face(e40, e34);
let entry = dcel.edges[e02];
assert_eq!(entry.next, e23);
assert_eq!(entry.prev, dcel.edges[e03].twin);
assert_eq!(entry.origin, v[0]);
}
#[test]
fn test_ccw_iterator() {
let mut dcel = DCEL::new();
let mut vs = Vec::new();
let central = dcel.insert_vertex(());
assert_eq!(dcel.vertex(central).ccw_out_edges().next(), None);
for _ in 0..5 {
vs.push(dcel.insert_vertex(()));
}
let mut last_edge = dcel.connect_two_isolated_vertices(central, vs[0], 0);
last_edge = dcel.edge(last_edge).sym().fix();
for vertex in &vs[1..] {
last_edge = dcel.connect_edge_to_isolated_vertex(last_edge, *vertex);
last_edge = dcel.edge(last_edge).sym().fix();
}
let out_edge = dcel.vertex(central).out_edge().unwrap();
let mut neighs: Vec<_> = out_edge.ccw_iter().map(|e| e.to().fix()).collect();
assert_eq!(neighs.len(), 5);
for i in 0..5 {
let first = neighs[i];
let second = neighs[(i + 1) % 5];
assert_eq!(first - 1, second % 5);
}
let revs: Vec<_> = out_edge.ccw_iter().rev().map(|e| e.to().fix()).collect();
neighs.reverse();
assert_eq!(neighs, revs);
}
#[test]
fn test_o_next_iterator() {
let mut dcel = DCEL::new();
let mut vs = Vec::new();
for _ in 0..5 {
vs.push(dcel.insert_vertex(()));
}
let mut last_edge = dcel.connect_two_isolated_vertices(vs[0], vs[1], 0);
let mut edges = vec![last_edge];
for vertex in &vs[2..] {
last_edge = dcel.connect_edge_to_isolated_vertex(last_edge, *vertex);
edges.push(last_edge);
}
edges.push(dcel.connect_edge_to_edge(last_edge, vs[0]));
let mut iterated: Vec<_> = dcel
.edge(edges[0])
.o_next_iterator()
.map(|e| e.fix())
.collect();
assert_eq!(iterated, edges);
let rev: Vec<_> = dcel
.edge(edges[0])
.o_next_iterator()
.rev()
.map(|e| e.fix())
.collect();
iterated.reverse();
assert_eq!(iterated, rev);
}
}
| {
CCWIterator::from_edge(self.dcel, self.handle)
} |
lib.rs | /*
Copyright (c) 2020 Todd Stellanova
LICENSE: BSD3 (see LICENSE file)
*/
#![no_std]
#[cfg(feature = "rttdebug")]
use panic_rtt_core::rprintln;
use crate::interface::SensorInterface;
use embedded_hal as hal;
use hal::blocking::delay::DelayMs;
pub mod interface;
/// Errors in this crate
#[derive(Debug)]
pub enum Error<CommE, PinE> {
/// Sensor communication error
Comm(CommE),
/// Pin setting error
Pin(PinE),
/// Sensor reading out of range
OutOfRange,
/// Configuration reads invalid
Configuration,
/// Unrecognized chip ID
UnknownChipId,
}
/// Gain settings ( in LSb/Gauss )
/// One tesla (T) is equal to 104 gauss
#[repr(u8)]
pub enum GainSetting {
///± 0.88 Ga / 0.73 (mGa/LSb)
Gain1370 = 0b00000000,
///± 1.30 Ga / 0.92 (mGa/LSb)
Gain1090 = 0b00100000,
///± 1.90 Ga / 1.22 (mGa/LSb)
Gain0820 = 0b01000000,
///± 2.50 Ga / 1.52 (mGa/LSb)
Gain0660 = 0b01100000,
///± 4.00 Ga / 2.27 (mGa/LSb)
Gain0440 = 0b10000000,
///± 4.70 Ga / 2.56 (mGa/LSb)
Gain0390 = 0b10100000,
///± 5.60 Ga / 3.03 (mGa/LSb)
Gain0330 = 0b11000000,
///± 8.10 Ga / 4.35 (mGa/LSb)
Gain0230 = 0b11100000,
}
/// Output Data Rate settings in Hz
#[repr(u8)]
pub enum OdrSetting {
Odr0_75Hz = 0b000,
Odr1_5Hz = 0b001,
Odr3_0Hz = 0b010,
Odr7_5Hz = 0b011,
Odr15_0Hz = 0b100,
Odr30_0Hz = 0b110,
Odr220_0Hz = 0b111,
}
/// Configuring sample averaging
#[repr(u8)]
pub enum SampleAvgSetting {
AvgSamples1 = 0b00,
AvgSamples2 = 0b01,
AvgSamples4 = 0b10,
/// Average 8 samples
AvgSamples8 = 0b11,
}
/// Measurement mode settings
#[repr(u8)]
pub enum MeasurementModeSetting {
NormalMode = 0b00,
/// Positive bias current
PositiveBias = 0b01,
/// Negative bias current-- unsupported on HMC5883
NegativeBias = 0b10,
/// Temperature sensor only -- unsupported on HMC5883
TemperatureOnly = 0b11,
}
pub struct HMC5983<SI> {
pub(crate) sensor_interface: SI,
/// Buffer for reads and writes to the sensor
block_buf: [u8; BLOCK_BUF_LEN],
}
impl<SI, CommE, PinE> HMC5983<SI>
where
SI: SensorInterface<InterfaceError = crate::Error<CommE, PinE>>,
{
pub fn new_with_interface(sensor_interface: SI) -> Self {
Self {
sensor_interface,
block_buf: [0; BLOCK_BUF_LEN],
}
}
pub fn init(
&mut self,
delay_source: &mut impl DelayMs<u8>,
) -> Result<(), crate::Error<CommE, PinE>> {
self.reset(delay_source)
}
fn reset(
&mut self,
delay_source: &mut impl DelayMs<u8>,
) -> Result<(), crate::Error<CommE, PinE>> {
//wakeup the chip
for reg in 0x00..0x0D {
let _val = self.read_reg(reg)?;
#[cfg(feature = "rttdebug")]
rprintln!("0x{:0x} : {} ", reg, _val);
}
const EXPECTED_PROD_ID_A: u8 = 72; //'H';
const EXPECTED_PROD_ID_B: u8 = 52; //'4';
const EXPECTED_PROD_ID_C: u8 = 51; //'3';
//compare product ID against known product ID
//read the product identifiers
self.sensor_interface
.read_block(REG_ID_A, &mut self.block_buf[..3])?;
if self.block_buf[0] != EXPECTED_PROD_ID_A
|| self.block_buf[1] != EXPECTED_PROD_ID_B
|| self.block_buf[2] != EXPECTED_PROD_ID_C
{
#[cfg(feature = "rttdebug")]
rprintln!(
"bad ID block: {},{},{}",
self.block_buf[0],
self.block_buf[1],
self.block_buf[2]
);
return Err(Error::UnknownChipId);
}
self.set_all_config_a(
MeasurementModeSetting::NormalMode,
OdrSetting::Odr30_0Hz,
SampleAvgSetting::AvgSamples8,
true,
)?;
self.set_gain(GainSetting::Gain0820)?;
// (Continuous-measurement mode)
self.sensor_interface.write_reg(
REG_CONFIG_C,
MeasurementModeSetting::NormalMode as u8,
)?;
delay_source.delay_ms(100);
Ok(())
}
/// Set the mag gain, which determines the range
pub fn set_gain(
&mut self,
gain: GainSetting,
) -> Result<(), crate::Error<CommE, PinE>> {
let gain_val: u8 = gain as u8;
self.sensor_interface.write_reg(REG_CONFIG_B, gain_val)?;
let confirm_val = self.read_reg(REG_CONFIG_B)?;
if confirm_val != gain_val {
#[cfg(feature = "rttdebug")]
rprintln!("gain bad: expected {} got {}", gain_val, confirm_val);
return Err(Error::Configuration);
}
Ok(())
}
/// Set all of the Config A register settings
pub fn set_all_config_a(
&mut self,
mode: MeasurementModeSetting,
odr: OdrSetting,
averaging: SampleAvgSetting,
temp_enabled: bool,
) -> Result<(), crate::Error<CommE, PinE>> {
| / Read a single register
fn read_reg(&mut self, reg: u8) -> Result<u8, crate::Error<CommE, PinE>> {
self.sensor_interface
.read_block(reg, &mut self.block_buf[..1])?;
Ok(self.block_buf[0])
}
/// Verify that a magnetometer reading is within the expected range.
// fn reading_in_range(sample: &[i16; 3]) -> bool {
// /// Maximum Dynamic Range for X and Y axes (micro Teslas)
// const MDR_XY_AXES: i16 = 1600;
// /// Maximum Dynamic Range for Z axis (micro Teslas)
// const MDR_Z_AXIS: i16 = 2500;
// /// Resolution (micro Teslas per LSB)
// const RESO_PER_BIT: f32 = 0.3;
// const MAX_VAL_XY: i16 =
// (((MDR_XY_AXES as f32) / RESO_PER_BIT) as i16) + 1;
// const MAX_VAL_Z: i16 =
// (((MDR_Z_AXIS as f32) / RESO_PER_BIT) as i16) + 1;
//
// sample[0].abs() < MAX_VAL_XY
// && sample[1].abs() < MAX_VAL_XY
// && sample[2].abs() < MAX_VAL_Z
// }
/// Combine high and low bytes of i16 mag value
fn raw_reading_to_i16(buf: &[u8], idx: usize) -> i16 {
let val: i16 = (buf[idx] as i16) | ((buf[idx + 1] as i16) << 8);
val
}
pub fn get_mag_vector(
&mut self,
) -> Result<[i16; 3], crate::Error<CommE, PinE>> {
const XYZ_DATA_LEN: usize = 6;
//get the actual mag data from the sensor
self.sensor_interface.read_block(
REG_MAG_DATA_START,
&mut self.block_buf[..XYZ_DATA_LEN],
)?;
let sample_i16 = [
Self::raw_reading_to_i16(&self.block_buf, 0),
Self::raw_reading_to_i16(&self.block_buf, 2),
Self::raw_reading_to_i16(&self.block_buf, 4),
];
// if !Self::reading_in_range(&sample_i16) {
// #[cfg(feature = "rttdebug")]
// rprintln!("bad reading?");
//
// return Err(Error::OutOfRange);
// }
//TODO do cross-axis flow calibration?
Ok(sample_i16)
}
/// Read temperature from device
/// Result is degrees Celsius
pub fn get_temperature(
&mut self,
) -> Result<i16, crate::Error<CommE, PinE>> {
const TEMP_DATA_LEN: usize = 2;
self.sensor_interface.read_block(
REG_TEMP_OUTPUT_MSB,
&mut self.block_buf[..TEMP_DATA_LEN],
)?;
//TODO datasheet is not clear whether the temp can go negative
// Temperature=(MSB*2^8+LSB)/(2^4*8)+25in C
let celsius = (((self.block_buf[0] as i16) * 256)
+ (self.block_buf[1] as i16))
/ 128
+ 25;
Ok(celsius)
}
}
const REG_CONFIG_A: u8 = 0x00;
const REG_CONFIG_B: u8 = 0x01;
const REG_CONFIG_C: u8 = 0x02;
/// X-axis output value register
const REG_DATA_X: u8 = 0x03;
// Y-axis output value register
// const REG_DATA_Y:u8 = 0x05;
// Z-axis output value register
// const REG_DATA_Z:u8 = 0x07;
// const REG_STATUS:u8 = 0x09;
/// Register to read out all three dimensions of mag data
const REG_MAG_DATA_START: u8 = REG_DATA_X;
/// Identification Register A
const REG_ID_A: u8 = 0x0A;
// Identification Register B
// const REG_ID_B: u8 = 0x0B;
// Identification Register C
// const REG_ID_C: u8 = 0x0C;
/// Temperature outputs, HMC5983
const REG_TEMP_OUTPUT_MSB: u8 = 0x31;
// const REG_TEMP_OUTPUT_LSB: u8 = 0x32;
// Status Register 2
// const REG_STATUS2: u8 = 0x09;
const BLOCK_BUF_LEN: usize = 32;
| let new_val = (if temp_enabled { (1 << 7) } else { 0 })
& ((averaging as u8) << 6)
& ((odr as u8) << 4)
& ((mode as u8) << 2);
self.sensor_interface.write_reg(REG_CONFIG_A, new_val)
}
// |
ShallowWrapper.js | import React from 'react';
import flatten from 'lodash/flatten';
import unique from 'lodash/uniq';
import compact from 'lodash/compact';
import cheerio from 'cheerio';
import {
nodeEqual,
containsChildrenSubArray,
propFromEvent,
withSetStateAllowed,
propsOfNode,
typeOfNode,
} from './Utils';
import {
debugNodes,
} from './Debug';
import {
getTextFromNode,
hasClassName,
childrenOfNode,
parentsOfNode,
treeFilter,
buildPredicate,
} from './ShallowTraversal';
import {
createShallowRenderer,
renderToStaticMarkup,
} from './react-compat';
/**
* Finds all nodes in the current wrapper nodes' render trees that match the provided predicate
* function.
*
* @param {ShallowWrapper} wrapper
* @param {Function} predicate
* @returns {ShallowWrapper}
*/
function findWhereUnwrapped(wrapper, predicate) {
return wrapper.flatMap(n => treeFilter(n.node, predicate));
}
/**
* Returns a new wrapper instance with only the nodes of the current wrapper instance that match
* the provided predicate function.
*
* @param {ShallowWrapper} wrapper
* @param {Function} predicate
* @returns {ShallowWrapper}
*/
function filterWhereUnwrapped(wrapper, predicate) {
return wrapper.wrap(compact(wrapper.nodes.filter(predicate)));
}
/**
* @class ShallowWrapper
*/
export default class ShallowWrapper {
constructor(nodes, root, options = {}) {
if (!root) {
this.root = this;
this.unrendered = nodes;
this.renderer = createShallowRenderer();
this.renderer.render(nodes, options.context);
this.node = this.renderer.getRenderOutput();
this.nodes = [this.node];
this.length = 1;
} else {
this.root = root;
this.unrendered = null;
this.renderer = null;
if (!Array.isArray(nodes)) {
this.node = nodes;
this.nodes = [nodes];
} else {
this.node = nodes[0];
this.nodes = nodes;
}
this.length = this.nodes.length;
}
this.options = options;
}
/**
* Gets the instance of the component being rendered as the root node passed into `shallow()`.
*
* NOTE: can only be called on a wrapper instance that is also the root instance.
*
* Example:
* ```
* const wrapper = shallow(<MyComponent />);
* const inst = wrapper.instance();
* expect(inst).to.be.instanceOf(MyComponent);
* ```
* @returns {ReactComponent}
*/
instance() {
return this.renderer._instance._instance;
}
/**
* Forces a re-render. Useful to run before checking the render output if something external
* may be updating the state of the component somewhere.
*
* NOTE: can only be called on a wrapper instance that is also the root instance.
*
* @returns {ShallowWrapper}
*/
update() {
if (this.root !== this) {
throw new Error('ShallowWrapper::update() can only be called on the root');
}
this.single(() => {
this.node = this.renderer.getRenderOutput();
this.nodes = [this.node];
});
return this;
}
/**
* A method that sets the props of the root component, and re-renders. Useful for when you are
* wanting to test how the component behaves over time with changing props. Calling this, for
* instance, will call the `componentWillReceiveProps` lifecycle method.
*
* Similar to `setState`, this method accepts a props object and will merge it in with the already
* existing props.
*
* NOTE: can only be called on a wrapper instance that is also the root instance.
*
* @param {Object} props object
* @returns {ShallowWrapper}
*/
setProps(props) {
if (this.root !== this) {
throw new Error('ShallowWrapper::setProps() can only be called on the root');
}
this.single(() => {
withSetStateAllowed(() => {
this.unrendered = React.cloneElement(this.unrendered, props);
this.renderer.render(this.unrendered, this.options.context);
this.update();
});
});
return this;
}
/**
* A method to invoke `setState` on the root component instance similar to how you might in the
* definition of the component, and re-renders. This method is useful for testing your component
* in hard to achieve states, however should be used sparingly. If possible, you should utilize
* your component's external API in order to get it into whatever state you want to test, in order
* to be as accurate of a test as possible. This is not always practical, however.
*
* NOTE: can only be called on a wrapper instance that is also the root instance.
*
* @param {Object} state to merge
* @returns {ShallowWrapper}
*/
setState(state) {
if (this.root !== this) {
throw new Error('ShallowWrapper::setState() can only be called on the root');
}
this.single(() => {
withSetStateAllowed(() => {
this.instance().setState(state);
this.update();
});
});
return this;
}
/**
* A method that sets the context of the root component, and re-renders. Useful for when you are
* wanting to test how the component behaves over time with changing contexts.
*
* NOTE: can only be called on a wrapper instance that is also the root instance.
*
* @param {Object} context object
* @returns {ShallowWrapper}
*/
setContext(context) {
if (this.root !== this) {
throw new Error('ShallowWrapper::setContext() can only be called on the root');
}
if (!this.options.context) {
throw new Error(
'ShallowWrapper::setContext() can only be called on a wrapper that was originally passed ' +
'a context option'
);
}
this.renderer.render(this.unrendered, context);
this.update();
return this;
}
/**
* Whether or not a given react element exists in the shallow render tree.
*
* Example:
* ```
* const wrapper = shallow(<MyComponent />);
* expect(wrapper.contains(<div className="foo bar" />)).to.equal(true);
* ```
*
* @param {ReactElement|Array<ReactElement>} nodeOrNodes
* @returns {Boolean}
*/
contains(nodeOrNodes) {
const predicate = Array.isArray(nodeOrNodes)
? other => containsChildrenSubArray(nodeEqual, other, nodeOrNodes)
: other => nodeEqual(nodeOrNodes, other);
return findWhereUnwrapped(this, predicate).length > 0;
}
/**
* Whether or not a given react element exists in the shallow render tree.
*
* Example:
* ```
* const wrapper = shallow(<MyComponent />);
* expect(wrapper.contains(<div className="foo bar" />)).to.equal(true);
* ```
*
* @param {ReactElement} node
* @returns {Boolean}
*/
equals(node) {
return this.single(() => nodeEqual(this.node, node));
}
/**
* Finds every node in the render tree of the current wrapper that matches the provided selector.
*
* @param {String|Function} selector
* @returns {ShallowWrapper}
*/
find(selector) {
const predicate = buildPredicate(selector);
return findWhereUnwrapped(this, predicate);
}
/**
* Returns whether or not current node matches a provided selector.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @param {String|Function} selector
* @returns {boolean}
*/
is(selector) {
const predicate = buildPredicate(selector);
return this.single(predicate);
}
/**
* Returns a new wrapper instance with only the nodes of the current wrapper instance that match
* the provided predicate function. The predicate should receive a wrapped node as its first
* argument.
*
* @param {Function} predicate
* @returns {ShallowWrapper}
*/
filterWhere(predicate) {
return filterWhereUnwrapped(this, n => predicate(this.wrap(n)));
}
/**
* Returns a new wrapper instance with only the nodes of the current wrapper instance that match
* the provided selector.
*
* @param {String|Function} selector
* @returns {ShallowWrapper}
*/
filter(selector) {
const predicate = buildPredicate(selector);
return filterWhereUnwrapped(this, predicate);
}
/**
* Returns a new wrapper instance with only the nodes of the current wrapper that did not match
* the provided selector. Essentially the inverse of `filter`.
*
* @param {String|Function} selector
* @returns {ShallowWrapper}
*/
not(selector) {
const predicate = buildPredicate(selector);
return filterWhereUnwrapped(this, n => !predicate(n));
}
/**
* Returns a string of the rendered text of the current render tree. This function should be
* looked at with skepticism if being used to test what the actual HTML output of the component
* will be. If that is what you would like to test, use enzyme's `render` function instead.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @returns {String}
*/ | text() {
return this.single(getTextFromNode);
}
/**
* Returns the HTML of the node.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @returns {String}
*/
html() {
return this.single(n => this.type() === null ? null : renderToStaticMarkup(n));
}
/**
* Returns the current node rendered to HTML and wrapped in a CheerioWrapper.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @returns {CheerioWrapper}
*/
render() {
return this.type() === null ? cheerio() : cheerio.load(this.html()).root();
}
/**
* A method that unmounts the component. This can be used to simulate a component going through
* and unmount/mount lifecycle.
* @returns {ShallowWrapper}
*/
unmount() {
this.renderer.unmount();
return this;
}
/**
* Used to simulate events. Pass an eventname and (optionally) event arguments. This method of
* testing events should be met with some skepticism.
*
* @param {String} event
* @param {Array} args
* @returns {ShallowWrapper}
*/
simulate(event, ...args) {
const handler = this.prop(propFromEvent(event));
if (handler) {
withSetStateAllowed(() => {
// TODO(lmr): create/use synthetic events
// TODO(lmr): emulate React's event propagation
handler(...args);
this.root.update();
});
}
return this;
}
/**
* Returns the props hash for the root node of the wrapper.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @returns {Object}
*/
props() {
return this.single(propsOfNode);
}
/**
* Returns the state hash for the root node of the wrapper. Optionally pass in a prop name and it
* will return just that value.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @param {String} name (optional)
* @returns {*}
*/
state(name) {
if (this.root !== this) {
throw new Error('ShallowWrapper::state() can only be called on the root');
}
const _state = this.single(() => this.instance().state);
if (name !== undefined) {
return _state[name];
}
return _state;
}
/**
* Returns the context hash for the root node of the wrapper.
* Optionally pass in a prop name and it will return just that value.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @param {String} name (optional)
* @returns {*}
*/
context(name) {
if (this.root !== this) {
throw new Error('ShallowWrapper::context() can only be called on the root');
}
const _context = this.single(() => this.instance().context);
if (name) {
return _context[name];
}
return _context;
}
/**
* Returns a new wrapper with all of the children of the current wrapper.
*
* @param {String|Function} [selector]
* @returns {ShallowWrapper}
*/
children(selector) {
const allChildren = this.flatMap(n => childrenOfNode(n.node));
return selector ? allChildren.filter(selector) : allChildren;
}
/**
* Returns a new wrapper with a specific child
*
* @param {Number} [index]
* @returns {ShallowWrapper}
*/
childAt(index) {
return this.single(() => this.children().at(index));
}
/**
* Returns a wrapper around all of the parents/ancestors of the wrapper. Does not include the node
* in the current wrapper.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @param {String|Function} [selector]
* @returns {ShallowWrapper}
*/
parents(selector) {
const allParents = this.wrap(this.single(n => parentsOfNode(n, this.root.node)));
return selector ? allParents.filter(selector) : allParents;
}
/**
* Returns a wrapper around the immediate parent of the current node.
*
* @returns {ShallowWrapper}
*/
parent() {
return this.flatMap(n => [n.parents().get(0)]);
}
/**
*
* @param {String|Function} selector
* @returns {ShallowWrapper}
*/
closest(selector) {
return this.is(selector) ? this : this.parents().filter(selector).first();
}
/**
* Shallow renders the current node and returns a shallow wrapper around it.
*
* NOTE: can only be called on wrapper of a single node.
*
* @param options object
* @returns {ShallowWrapper}
*/
shallow(options) {
return this.single((n) => new ShallowWrapper(n, null, options));
}
/**
* Returns the value of prop with the given name of the root node.
*
* @param propName
* @returns {*}
*/
prop(propName) {
return this.props()[propName];
}
/**
* Returns the type of the root ndoe of this wrapper. If it's a composite component, this will be
* the component constructor. If it's native DOM node, it will be a string.
*
* @returns {String|Function}
*/
type() {
return this.single(typeOfNode);
}
/**
* Returns whether or not the current root node has the given class name or not.
*
* NOTE: can only be called on a wrapper of a single node.
*
* @param className
* @returns {Boolean}
*/
hasClass(className) {
if (className && className.indexOf('.') !== -1) {
console.warn(
'It looks like you\'re calling `ShallowWrapper::hasClass()` with a CSS selector. ' +
'hasClass() expects a class name, not a CSS selector.'
);
}
return this.single(n => hasClassName(n, className));
}
/**
* Iterates through each node of the current wrapper and executes the provided function with a
* wrapper around the corresponding node passed in as the first argument.
*
* @param {Function} fn
* @returns {ShallowWrapper}
*/
forEach(fn) {
this.nodes.forEach((n, i) => fn.call(this, this.wrap(n), i));
return this;
}
/**
* Maps the current array of nodes to another array. Each node is passed in as a `ShallowWrapper`
* to the map function.
*
* @param {Function} fn
* @returns {Array}
*/
map(fn) {
return this.nodes.map((n, i) => fn.call(this, this.wrap(n), i));
}
/**
* Reduces the current array of nodes to a value. Each node is passed in as a `ShallowWrapper`
* to the reducer function.
*
* @param {Function} fn - the reducer function
* @param {*} initialValue - the initial value
* @returns {*}
*/
reduce(fn, initialValue) {
return this.nodes.reduce(
(accum, n, i) => fn.call(this, accum, this.wrap(n), i),
initialValue
);
}
/**
* Reduces the current array of nodes to another array, from right to left. Each node is passed
* in as a `ShallowWrapper` to the reducer function.
*
* @param {Function} fn - the reducer function
* @param {*} initialValue - the initial value
* @returns {*}
*/
reduceRight(fn, initialValue) {
return this.nodes.reduceRight(
(accum, n, i) => fn.call(this, accum, this.wrap(n), i),
initialValue
);
}
/**
* Returns whether or not any of the nodes in the wrapper match the provided selector.
*
* @param {Function|String} selector
* @returns {Boolean}
*/
some(selector) {
const predicate = buildPredicate(selector);
return this.nodes.some(predicate);
}
/**
* Returns whether or not any of the nodes in the wrapper pass the provided predicate function.
*
* @param {Function} predicate
* @returns {Boolean}
*/
someWhere(predicate) {
return this.nodes.some((n, i) => predicate.call(this, this.wrap(n), i));
}
/**
* Returns whether or not all of the nodes in the wrapper match the provided selector.
*
* @param {Function|String} selector
* @returns {Boolean}
*/
every(selector) {
const predicate = buildPredicate(selector);
return this.nodes.every(predicate);
}
/**
* Returns whether or not any of the nodes in the wrapper pass the provided predicate function.
*
* @param {Function} predicate
* @returns {Boolean}
*/
everyWhere(predicate) {
return this.nodes.every((n, i) => predicate.call(this, this.wrap(n), i));
}
/**
* Utility method used to create new wrappers with a mapping function that returns an array of
* nodes in response to a single node wrapper. The returned wrapper is a single wrapper around
* all of the mapped nodes flattened (and de-duplicated).
*
* @param {Function} fn
* @returns {ShallowWrapper}
*/
flatMap(fn) {
const nodes = this.nodes.map((n, i) => fn.call(this, this.wrap(n), i));
const flattened = flatten(nodes, true);
const uniques = unique(flattened);
return this.wrap(uniques);
}
/**
* Finds all nodes in the current wrapper nodes' render trees that match the provided predicate
* function. The predicate function will receive the nodes inside a ShallowWrapper as its
* first argument.
*
* @param {Function} predicate
* @returns {ShallowWrapper}
*/
findWhere(predicate) {
return findWhereUnwrapped(this, n => predicate(this.wrap(n)));
}
/**
* Returns the node at a given index of the current wrapper.
*
* @param index
* @returns {ReactElement}
*/
get(index) {
return this.nodes[index];
}
/**
* Returns a wrapper around the node at a given index of the current wrapper.
*
* @param index
* @returns {ShallowWrapper}
*/
at(index) {
return this.wrap(this.nodes[index]);
}
/**
* Returns a wrapper around the first node of the current wrapper.
*
* @returns {ShallowWrapper}
*/
first() {
return this.at(0);
}
/**
* Returns a wrapper around the last node of the current wrapper.
*
* @returns {ShallowWrapper}
*/
last() {
return this.at(this.length - 1);
}
/**
* Returns true if the current wrapper has no nodes. False otherwise.
*
* @returns {boolean}
*/
isEmpty() {
return this.length === 0;
}
/**
* Utility method that throws an error if the current instance has a length other than one.
* This is primarily used to enforce that certain methods are only run on a wrapper when it is
* wrapping a single node.
*
* @param fn
* @returns {*}
*/
single(fn) {
if (this.length !== 1) {
throw new Error(
`This method is only meant to be run on single node. ${this.length} found instead.`
);
}
return fn.call(this, this.node);
}
/**
* Helpful utility method to create a new wrapper with the same root as the current wrapper, with
* any nodes passed in as the first parameter automatically wrapped.
*
* @param node
* @returns {ShallowWrapper}
*/
wrap(node) {
if (node instanceof ShallowWrapper) {
return node;
}
return new ShallowWrapper(node, this.root);
}
/**
* Returns an HTML-like string of the shallow render for debugging purposes.
*
* @returns {String}
*/
debug() {
return debugNodes(this.nodes);
}
} | |
ball_against_ball.rs | use na;
use math::{Scalar, Point, Vect};
use entities::shape::Ball;
/// Distance between balls.
#[inline]
pub fn ball_against_ball<N, P, V>(center1: &P, b1: &Ball<N>, center2: &P, b2: &Ball<N>) -> N
where N: Scalar,
P: Point<N, V>,
V: Vect<N> {
let r1 = b1.radius();
let r2 = b2.radius();
let delta_pos = *center2 - *center1;
let sqdist = na::sqnorm(&delta_pos);
let sum_radius = r1 + r2;
if sqdist <= sum_radius * sum_radius {
na::zero()
}
else {
sqdist.sqrt() - sum_radius | }
} |
|
dashboard.go | /*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"strings"
"time"
"github.com/nuclio/nuclio/pkg/dashboard"
"github.com/nuclio/nuclio/pkg/dashboard/functiontemplates"
"github.com/nuclio/nuclio/pkg/errors"
"github.com/nuclio/nuclio/pkg/loggersink"
"github.com/nuclio/nuclio/pkg/platform/factory"
"github.com/nuclio/nuclio/pkg/platformconfig"
// load all sinks
_ "github.com/nuclio/nuclio/pkg/sinks"
"github.com/nuclio/nuclio/pkg/version"
"github.com/nuclio/logger"
)
func Run(listenAddress string,
dockerKeyDir string,
defaultRegistryURL string,
defaultRunRegistryURL string,
platformType string,
noPullBaseImages bool,
defaultCredRefreshIntervalString string,
externalIPAddresses string,
defaultNamespace string,
offline bool,
platformConfigurationPath string,
templatesGitRepository string,
templatesGitRef string,
templatesArchiveAddress string,
templatesGitUsername string,
templatesGitPassword string,
templatesGithubAccessToken string,
defaultHTTPIngressHostTemplate string,
platformAuthorizationMode string,
dependantImageRegistryURL string) error {
var functionGitTemplateFetcher *functiontemplates.GitFunctionTemplateFetcher
var functionZipTemplateFetcher *functiontemplates.ZipFunctionTemplateFetcher
// read platform configuration
platformConfiguration, err := readPlatformConfiguration(platformConfigurationPath)
if err != nil {
return errors.Wrap(err, "Failed to read platform configuration")
}
// create a root logger
rootLogger, err := loggersink.CreateSystemLogger("dashboard", platformConfiguration)
if err != nil {
return errors.Wrap(err, "Failed to create logger")
}
// create a platform
platformInstance, err := factory.CreatePlatform(rootLogger, platformType, nil)
if err != nil {
return errors.Wrap(err, "Failed to create platform")
}
// create git fetcher
if templatesGitRepository != "" && templatesGitRef != "" {
rootLogger.DebugWith("Fetching function templates from git repository",
"templatesGitRepository", templatesGitRepository,
"templatesGitRef", templatesGitRef)
// attach credentials if given
templatesGitRepository = attachCredentialsToGitRepository(rootLogger,
templatesGitRepository,
templatesGitUsername,
templatesGitPassword,
templatesGithubAccessToken)
functionGitTemplateFetcher, err = functiontemplates.NewGitFunctionTemplateFetcher(rootLogger,
templatesGitRepository,
templatesGitRef)
if err != nil {
return errors.Wrap(err, "Failed to create git fetcher")
}
} else {
rootLogger.DebugWith("Missing git fetcher configuration, templates from git won't be fetched",
"gitTemplateRepository", templatesGitRepository,
"templatesGitRef", templatesGitRef)
}
// create zip fetcher
if templatesArchiveAddress != "" {
functionZipTemplateFetcher, err = functiontemplates.NewZipFunctionTemplateFetcher(rootLogger,
templatesArchiveAddress)
if err != nil {
return errors.Wrap(err, "Failed to create zip template fetcher")
}
}
// create pre-generated templates fetcher
functionTemplatesGeneratedFetcher, err := functiontemplates.NewGeneratedFunctionTemplateFetcher(rootLogger)
if err != nil {
return errors.Wrap(err, "Failed to create pre-generated fetcher")
}
// make repository for fetchers
functionTemplateFetchers := []functiontemplates.FunctionTemplateFetcher{functionTemplatesGeneratedFetcher}
if functionGitTemplateFetcher != nil {
functionTemplateFetchers = append(functionTemplateFetchers, functionGitTemplateFetcher)
}
if functionZipTemplateFetcher != nil {
functionTemplateFetchers = append(functionTemplateFetchers, functionZipTemplateFetcher)
}
functionTemplatesRepository, err := functiontemplates.NewRepository(rootLogger, functionTemplateFetchers)
if err != nil {
return errors.Wrap(err, "Failed to create repository out of given fetchers")
}
// set external ip addresses based if user passed overriding values or not
var splitExternalIPAddresses []string
if externalIPAddresses == "" {
splitExternalIPAddresses, err = platformInstance.GetDefaultInvokeIPAddresses()
if err != nil {
return errors.Wrap(err, "Failed to get default invoke ip addresses")
}
} else {
// "10.0.0.1,10.0.0.2" -> ["10.0.0.1", "10.0.0.2"]
splitExternalIPAddresses = strings.Split(externalIPAddresses, ",")
}
err = platformInstance.SetExternalIPAddresses(splitExternalIPAddresses)
if err != nil {
return errors.Wrap(err, "Failed to set external ip addresses")
}
if defaultHTTPIngressHostTemplate != "" {
platformInstance.SetDefaultHTTPIngressHostTemplate(defaultHTTPIngressHostTemplate)
}
rootLogger.InfoWith("Starting",
"name", platformInstance.GetName(),
"noPull", noPullBaseImages,
"offline", offline,
"defaultCredRefreshInterval", defaultCredRefreshIntervalString,
"defaultNamespace", defaultNamespace,
"platformConfiguration", platformConfiguration)
// see if the platform has anything to say about the namespace
defaultNamespace = platformInstance.ResolveDefaultNamespace(defaultNamespace)
version.Log(rootLogger)
trueValue := true
// create a web server configuration
webServerConfiguration := &platformconfig.WebServer{
Enabled: &trueValue,
ListenAddress: listenAddress,
}
server, err := dashboard.NewServer(rootLogger,
dockerKeyDir,
defaultRegistryURL,
defaultRunRegistryURL,
platformInstance,
noPullBaseImages,
webServerConfiguration,
getDefaultCredRefreshInterval(rootLogger, defaultCredRefreshIntervalString),
splitExternalIPAddresses,
defaultNamespace,
offline,
functionTemplatesRepository,
platformConfiguration,
defaultHTTPIngressHostTemplate,
platformAuthorizationMode,
dependantImageRegistryURL)
if err != nil {
return errors.Wrap(err, "Failed to create server")
}
err = server.Start()
if err != nil {
return errors.Wrap(err, "Failed to start server")
}
select {}
}
func getDefaultCredRefreshInterval(logger logger.Logger, defaultCredRefreshIntervalString string) *time.Duration {
var defaultCredRefreshInterval time.Duration
defaultInterval := 12 * time.Hour
// if set to "none" - no refresh interval
if defaultCredRefreshIntervalString == "none" {
return nil
}
// if unspecified, default to 12 hours
if defaultCredRefreshIntervalString == "" {
return &defaultInterval
}
// try to parse the refresh interval - if failed
defaultCredRefreshInterval, err := time.ParseDuration(defaultCredRefreshIntervalString)
if err != nil {
logger.WarnWith("Failed to parse default credential refresh interval, defaulting",
"given", defaultCredRefreshIntervalString,
"default", defaultInterval)
return &defaultInterval
}
return &defaultCredRefreshInterval
}
func readPlatformConfiguration(configurationPath string) (*platformconfig.Config, error) {
platformConfigurationReader, err := platformconfig.NewReader()
if err != nil {
return nil, errors.Wrap(err, "Failed to create platform configuration reader")
}
return platformConfigurationReader.ReadFileOrDefault(configurationPath)
}
// create new repo URL with the credentials inside of it (when credentials are passed)
// example: https://github.com/owner/repo.git -> https://<USERNAME>:<PASSWORD>@github.com/owner/repo.git
func attachCredentialsToGitRepository(logger logger.Logger, repo, username, password, accessToken string) string {
if accessToken != "" {
username = accessToken
password = "x-oauth-basic"
} else if username == "" || password == "" {
return repo
}
splitRepo := strings.Split(repo, "//")
if len(splitRepo) != 2 |
return strings.Join([]string{splitRepo[0], "//", username, ":", password, "@", splitRepo[1]}, "")
}
| {
logger.WarnWith("Unknown git repository structure. Skipping credentials attachment", "repo", repo)
return repo
} |
khr_dedicated_allocation.rs | // Copyright (c) 2017, Dennis Hamester <[email protected]>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
// FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
//! [`VK_KHR_dedicated_allocation`](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation)
use core::ptr;
use libc::c_void;
use vk;
pub const VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION: u32 = 3;
pub const VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME: &'static [u8; 28] = b"VK_KHR_dedicated_allocation\x00";
pub const VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME_STR: &'static str = "VK_KHR_dedicated_allocation";
/// See [`VkMemoryDedicatedRequirementsKHR`](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VkMemoryDedicatedRequirementsKHR)
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkMemoryDedicatedRequirementsKHR {
pub sType: vk::VkStructureType,
pub pNext: *mut c_void,
pub prefersDedicatedAllocation: vk::VkBool32,
pub requiresDedicatedAllocation: vk::VkBool32,
}
impl Default for VkMemoryDedicatedRequirementsKHR {
fn | () -> Self {
VkMemoryDedicatedRequirementsKHR {
sType: vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
pNext: ptr::null_mut(),
prefersDedicatedAllocation: Default::default(),
requiresDedicatedAllocation: Default::default(),
}
}
}
/// See [`VkMemoryDedicatedAllocateInfoKHR`](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VkMemoryDedicatedAllocateInfoKHR)
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkMemoryDedicatedAllocateInfoKHR {
pub sType: vk::VkStructureType,
pub pNext: *const c_void,
pub image: vk::VkImage,
pub buffer: vk::VkBuffer,
}
impl Default for VkMemoryDedicatedAllocateInfoKHR {
fn default() -> Self {
VkMemoryDedicatedAllocateInfoKHR {
sType: vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: Default::default(),
buffer: Default::default(),
}
}
}
| default |
main.rs | use mongodb::bson::{doc, Document, };
#[macro_use]
extern crate rocket;
use rocket::fairing::AdHoc;
use rocket_cors::{AllowedHeaders, AllowedOrigins};
use rocket::http::Method;
use rocket::{get, routes};
mod config;
mod mdb;
mod shared;
mod crypto;
mod user;
#[get("/")]
pub async fn index() -> &'static str {
"C02 Emissions"
}
#[launch]
async fn rocket() -> _ {
println!("Initializing config...");
let config_figment = config::get_figment().expect("Initializing config failed");
let config: config::Config = config_figment.extract().expect("Initializing config failed");
println!("Connecting to database...");
let mongo = mdb::MongoDb::new(&config.db_connection, Some(String::from("MongoDB")), &config.db_name).await.expect("Creating DB pool failed");
mongo.get_con().run_command(doc! { "ping": 1 }, None).await.unwrap();
println!("Connected to mongodb");
let allowed_origins = AllowedOrigins::all();
let cors = rocket_cors::CorsOptions {
allowed_origins,
allowed_methods: vec![
Method::Get,
Method::Post,
Method::Patch,
Method::Put,
Method::Delete,
Method::Head,
Method::Options].into_iter().map(From::from).collect(),
allowed_headers: AllowedHeaders::all(),
allow_credentials: true,
..Default::default()
}
.to_cors().unwrap();
| user::register::register_route,
user::login::login_route
])
.register("/", vec![rocketjson::error::get_catcher()])
.attach(AdHoc::config::<config::Config>())
.attach(cors)
.manage(mongo)
} |
rocket::custom(config_figment)
.mount("/", routes![
index, |
abstract_dataset.py | import abc
from abc import abstractmethod
class Dataset(abc.ABC):
def __init__(
self,
input_params,
with_labels=False,
):
self.batch_size = input_params.batch_size
self.buffer_size = input_params.buffer_size
if with_labels:
self.train_dataset = self.load_data_with_labels()
else:
self.train_dataset = self.load_data()
@abstractmethod
def load_data(self):
raise NotImplementedError
@abstractmethod
def load_data_with_labels(self):
raise NotImplementedError |
def __iter__(self):
return iter(self.train_dataset) |
|
BitcoinTransactionDetail.ts | import BigNumber from 'bignumber.js';
import { Transform, Type } from 'class-transformer';
class | {
@Transform(value => new BigNumber(value), { toClassOnly: true })
value: BigNumber;
addresses: string[];
hex: string;
get address(): string | null {
return this.addresses.length > 0 ? this.addresses[0] : null;
}
}
export class BitcoinTransactionDetail {
txid: string;
confirmations: number;
fees: string;
version: number;
@Transform(value => new BigNumber(value), { toClassOnly: true })
blockHeight: BigNumber;
@Transform(value => new BigNumber(value), { toClassOnly: true })
blockTime: BigNumber;
@Type(() => Output)
vin: Output[];
@Type(() => Output)
vout: Output[];
get from(): string[] {
return BitcoinTransactionDetail.txAddresses(this.vin);
}
get to(): string[] {
return BitcoinTransactionDetail.txAddresses(this.vout);
}
private static txAddresses(array: Output[]): string[] {
return array
.reduce((acc, output) => [...acc, ...output.addresses], [] as string[]) // Flatmap
.reduce((acc, addr) => (acc.indexOf(addr) >= 0 ? [...acc, addr] : acc), [] as string[]); // Unique
}
}
| Output |
mark_fixed.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for marking a testcase as fixed."""
from flask import request
from handlers import base_handler_flask
from handlers.testcase_detail import show
from libs import handler_flask
from libs import helpers
def mark(testcase):
"""Mark the testcase as fixed."""
testcase.fixed = 'Yes'
testcase.open = False
testcase.put()
helpers.log('Marked testcase %s as fixed' % testcase.key.id(),
helpers.MODIFY_OPERATION)
return testcase
class Handler(base_handler_flask.Handler):
"""Handler that marks a testcase as fixed."""
| def post(self):
"""Mark the testcase as fixed."""
testcase_id = request.get('testcaseId')
testcase = helpers.get_testcase(testcase_id)
mark(testcase)
return self.render_json(show.get_testcase_detail(testcase)) | @handler_flask.post(handler_flask.JSON, handler_flask.JSON)
@handler_flask.require_csrf_token
@handler_flask.check_admin_access |
mod.rs | // Copyright 2020 The Vega Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common building blocks that compose runtimes for the Vega blockchain.
//!
//! Each runtime contains specific services that execute transactions, process events,
//! provide user APIs, etc. A unified *dispatcher* redirects all the calls
//! and requests to an appropriate runtime environment. Thus, a blockchain interacts with the
//! dispatcher, and not with specific runtime instances.
//!
//! # Artifacts
//!
//! An artifact creates service instances similar to classes in object-oriented programming.
//! Artifacts reflect the assumption that deploying business logic onto the blockchain
//! may take a long time, may fail, end up with differing results on different nodes, etc.
//! Thus, artifacts decouple the complex *deployment* of the business logic from its instantiation
//! (which we assume is simple / non-fallible).
//!
//! Depending on the runtime, an artifact may have an additional specification required
//! for its deployment; e.g., files to be compiled.
//!
//! Each runtime has its own [artifacts] registry. Users can create services from the stored
//! artifacts. An artifact identifier is required by the runtime to construct service instances.
//! In other words, an artifact identifier is similar to a class name, and a specific
//! service instance - to a class instance. A single artifact may be used to instantiate
//! zero or more services.
//!
//! The format of the artifact ID is uniform across runtimes - it is essentially a string.
//! But the runtime may customize artifact deployment via runtime-specific deployment arguments.
//!
//! # Artifact Lifecycle
//!
//! 1. An artifact is assembled in a way specific to the runtime. For example, an artifact may
//! be compiled from sources and packaged using an automated build system.
//!
//! 2. The artifact with the service is deployed on the blockchain. The decision to deploy the
//! artifact and the deployment spec are usually performed by the blockchain administrators.
//! The corresponding logic is customizable via the [supervisor service](#supervisor-service).
//! What deployment entails depends on the runtime; e.g., the artifact may be downloaded
//! by each Vega node, verified for integrity and then added into the execution environment.
//!
//! 3. For each node, an artifact may be deployed either asynchronously or synchronously, that is
//! in a blocking manner. The supervisor usually first commands a node to deploy the artifact
//! asynchronously via [`Mailbox`], once the decision to start deployment is reached
//! by the blockchain administrators. Asynchronous deployment speed and outcome may differ among
//! nodes.
//!
//! 4. The supervisor translates the local deployment outcomes into a consensus-agreed result.
//! For example, the supervisor may collect confirmations from the validator nodes that have
//! successfully deployed the artifact. Once all the validator nodes have sent
//! their confirmations, the artifact is *committed*. As a part of the service logic,
//! artifact commitment is completely deterministic, agreed via consensus,
//! and occurs at the same blockchain height for all nodes in the network.
//!
//! 5. Once the artifact is committed, every node in the network must have it deployed
//! in order to continue functioning.
//! If a node has not deployed the artifact previously, deployment becomes blocking. The node
//! does not participate in consensus or block processing until the deployment is completed
//! successfully. If the deployment is unsuccessful, the node stops indefinitely.
//! The deployment confirmation mechanics is built into the supervisor. Thus, it is reasonable
//! to assume that a deployment failure at this stage is local to the node and
//! could be fixed by the node admin.
//!
//! # Service Lifecycle
//!
//! 1. Once the artifact is committed, it is possible to instantiate the corresponding service.
//! Each instantiation request contains an ID of the previously deployed artifact,
//! a string instance ID, and instantiation arguments in a binary encoding
//! (by convention, Protobuf). As with the artifacts, the logic that controls instantiation
//! is encapsulated in the supervisor service.
//!
//! 2. During instantiation the service gets a numeric ID, which is used to reference
//! the service in transactions. The runtime can execute initialization logic defined
//! in the service artifact; e.g., the service may store some initial data in the storage,
//! check service dependencies, etc. If the service (or the enclosing runtime) signals that
//! the initialization failed, the service is considered not instantiated.
//!
//! 3. Once the service is instantiated, it can process transactions and interact with the
//! external users in other ways. Different services instantiated from the same artifact
//! are independent and have separate blockchain storages. Users can distinguish services
//! by their IDs; both numeric and string IDs are unique within a blockchain. (Note that
//! the transition to the "active" state is not immediate;
//! see [*Service State Transitions*](#service-state-transitions) section below.)
//!
//! 4. Active service instances can be stopped by a corresponding request to the dispatcher.
//! A stopped service no longer participates in business logic, i.e. it does not process
//! transactions, events, does not interact with the users in any way.
//! Service data becomes unavailable for the other services, but still exists. The service name
//! and identifier remain reserved for the stopped service and can't be used again for
//! adding new services.
//!
//! The dispatcher is responsible for persisting artifacts and services across node restarts.
//!
//! ## Service Hooks
//!
//! Each active service is called before any transactions in the block are processed;
//! we call this `before_transactions` hook. The service may modify the blockchain state in this hook.
//! Likewise, each active service is called after all transactions in the block have been processed
//! (we call this `after_transactions` hook). These calls are quite similar to transactions:
//!
//! - Each call is isolated
//! - Service logic may return an error, meaning that all state changes made within the hook
//! are rolled back
//! - The service may call other services within the hook
//!
//! ## Service State Transitions
//!
//! Transitions between service states (including service creation) occur once the block
//! with the transition is committed; the effect of a transition is not immediate. This means
//! that, for example, an instantiated service cannot process transactions or internal calls
//! in the block with instantiation, but can in the following block. Likewise, the service hooks
//! (`before_transactions` / `after_transactions`) are *not* called in the block with service
//! instantiation.
//!
//! When the service is stopped, the reverse is true:
//!
//! - The service continues processing transactions until the end of the block containing
//! the stop command
//! - The service hooks *are* called for the service in this block
//!
//! # Transaction Lifecycle
//!
//! 1. An Vega client creates a transaction message which includes two parts. The first part is
//! the [`CallInfo`] - information about a method to call. The second part is the
//! serialized method parameters as a payload.
//! The client then signs the message using the Ed25519 signature system.
//!
//! 2. The client transmits the message to one of the Vega nodes in the network.
//!
//! 3. The node verifies correctness of the transaction signature and retransmits it to
//! the other network nodes if it is correct.
//!
//! 4. When the consensus algorithm finds a feasible candidate for the next block
//! of transactions, transactions in this block are passed to the dispatcher for execution.
//!
//! 5. The dispatcher uses a lookup table to find the corresponding [`Runtime`] for each transaction
//! by the [`instance_id`] recorded in the transaction message. If the corresponding runtime exists,
//! the dispatcher passes the transaction into this runtime for immediate [execution].
//!
//! 6. After execution the transaction [execution status] is written into the blockchain.
//!
//! # Data Migration Lifecycle
//!
//! Service data can be migrated to a newer version of the service artifact.
//! See [`migrations` module docs] for details.
//!
//! # Supervisor Service
//!
//! A supervisor service is a service that has additional privileges. This service
//! allows deploying artifacts and instantiating new services after the blockchain is launched
//! and running. Moreover the Supervisor service allows update the configuration or stop the
//! active service instances.
//! Other than that, it looks like an ordinary service.
//!
//! To enable adding new artifacts / services to the blockchain after its start, the supervisor
//! must be one of the builtin service instances.
//!
//! The supervisor service is distinguished by its numerical ID, which must be set
//! to [`SUPERVISOR_INSTANCE_ID`]. Services may assume that transactions originating from
//! the supervisor service are authorized by the blockchain administrators. This can be used
//! in services: if a certain transaction originates from a service with `SUPERVISOR_INSTANCE_ID`,
//! it is authorized by the administrators.
//!
//! [`AnyTx`]: struct.AnyTx.html
//! [`CallInfo`]: struct.CallInfo.html
//! [`instance_id`]: struct.CallInfo.html#structfield.instance_id
//! [`Runtime`]: trait.Runtime.html
//! [execution]: trait.Runtime.html#execute
//! [execution status]: struct.ExecutionStatus.html
//! [artifacts]: struct.ArtifactId.html
//! [`migrations` module docs]: migrations/index.html
//! [`SUPERVISOR_INSTANCE_ID`]: constant.SUPERVISOR_INSTANCE_ID.html
//! [`Mailbox`]: struct.Mailbox.html
//! [`ExecutionError`]: struct.ExecutionError.html
//! [`instance_id`]: struct.CallInfo.html#structfield.method_id
pub(crate) use self::dispatcher::Dispatcher;
pub use self::{
blockchain_data::{BlockchainData, SnapshotExt},
dispatcher::{Action as DispatcherAction, Mailbox, Schema as DispatcherSchema},
error::{
catch_panic, CallSite, CallType, CommonError, CoreError, ErrorKind, ErrorMatch,
ExecutionError, ExecutionFail, ExecutionStatus,
},
execution_context::{ExecutionContext, ExecutionContextUnstable, SupervisorExtensions},
types::{
AnyTx, ArtifactId, ArtifactSpec, ArtifactState, ArtifactStatus, CallInfo, Caller,
CallerAddress, InstanceId, InstanceQuery, InstanceSpec, InstanceState, InstanceStatus,
MethodId,
},
};
// Re-export for serializing `ExecutionError` via `serde`.
#[doc(hidden)]
pub use error::execution_error::ExecutionErrorSerde;
pub mod migrations;
pub mod versioning;
use vega_merkledb::Snapshot;
use futures::Future;
use semver::Version;
use std::fmt;
use self::migrations::{InitMigrationError, MigrationScript};
use crate::blockchain::Blockchain;
mod blockchain_data;
mod dispatcher;
pub(crate) mod error;
mod execution_context;
mod types;
/// Persistent identifier of a supervisor service instance.
///
/// Only a service with this ID can perform actions with the dispatcher.
pub const SUPERVISOR_INSTANCE_ID: InstanceId = 0;
/// List of predefined runtimes.
///
/// This type is not intended to be exhaustively matched. It can be extended in the future
/// without breaking the semver compatibility.
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
#[repr(u32)]
pub enum RuntimeIdentifier {
/// Built-in Rust runtime.
Rust = 0,
/// Vega Java Binding runtime.
Java = 1,
/// Never actually generated.
#[doc(hidden)]
__NonExhaustive,
}
impl From<RuntimeIdentifier> for u32 {
fn from(id: RuntimeIdentifier) -> Self |
}
impl RuntimeIdentifier {
fn transform(id: u32) -> Result<Self, ()> {
match id {
0 => Ok(RuntimeIdentifier::Rust),
1 => Ok(RuntimeIdentifier::Java),
_ => Err(()),
}
}
}
impl fmt::Display for RuntimeIdentifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RuntimeIdentifier::Rust => f.write_str("Rust runtime"),
RuntimeIdentifier::Java => f.write_str("Java runtime"),
RuntimeIdentifier::__NonExhaustive => unreachable!("Never actually generated"),
}
}
}
/// Runtime environment for Vega services.
///
/// You can read more about the life cycle of services and transactions
/// [in the module docs](index.html#service-life-cycle).
///
/// Using this trait, you can extend the Vega blockchain with the services written in
/// different languages.
///
/// # Stability
///
/// This trait is considered unstable; breaking changes may be introduced to it within
/// semantically non-breaking releases. However, it is guaranteed that such changes
/// will require reasonable amount of updates from the `Runtime` implementations.
///
/// # Call Ordering
///
/// Within the lifetime of a `Runtime`, calls to its methods have the following order:
///
/// ```text
/// LIFE ::= initialize (GENESIS | RESUME) BLOCK* shutdown
/// GENESIS ::= (deploy_artifact | initiate_adding_service update_service_status)* after_commit
/// RESUME ::= (deploy_artifact | update_service_status)* on_resume
/// BLOCK* ::= PROPOSAL+ COMMIT
/// PROPOSAL ::= before_transactions* (execute | initiate_adding_service)* after_transactions*
/// COMMIT ::= deploy_artifact* update_service_status* after_commit
/// ```
///
/// The ordering for the "read-only" method `is_artifact_deployed` in relation
/// to the lifecycle above is not specified.
///
/// # Consensus and Local Methods
///
/// The following methods should return the same result if provided arguments are the same for all
/// the nodes in the blockchain network:
///
/// - `before_transactions`
/// - `execute`
/// - `after_transactions`
/// - `initiate_adding_service`
/// - `initiate_resuming_service`
///
/// All these methods should also produce the same changes to the storage via
/// the provided `ExecutionContext`. Discrepancy in node behavior within these methods may lead
/// to a consensus failure.
///
/// Other `Runtime` methods may execute logic specific to the node.
///
/// # Handling Panics
///
/// Panics in the `Runtime` methods are **not** caught. A panic in the runtime method will cause
/// the node termination. To catch panics in the Rust code and convert them to unchecked execution
/// errors, use the [`catch_panic`](fn.catch_panic.html) method.
#[allow(unused_variables)]
pub trait Runtime: Send + fmt::Debug + 'static {
/// Initializes the runtime, providing a `Blockchain` instance for further use.
///
/// Calling this method always takes place before calling any other `Runtime` methods.
/// The `initialize` method is called *exactly once* during the `Runtime` lifetime.
///
/// The default implementation does nothing.
fn initialize(&mut self, blockchain: &Blockchain) {}
/// Notifies the runtime that the dispatcher has completed re-initialization after the
/// node restart. Re-initialization includes restoring the deployed artifacts / started service
/// instances for all the runtimes.
///
/// This method is called *maximum once* during the `Runtime` lifetime. It is called iff
/// the genesis block was already created before the node start (e.g. after node relaunch).
/// The blockchain state will remain the same between the `initialize` and `on_resume` calls.
///
/// The default implementation does nothing.
fn on_resume(&mut self) {}
/// A request to deploy an artifact with the given identifier and an additional deploy
/// specification.
///
/// This method is called *once* for a specific artifact during the `Runtime` lifetime:
///
/// - For newly added artifacts, the method is called as the supervisor service decides to deploy
/// the artifact.
/// - After the node restart, the method is called for all the previously deployed artifacts.
///
/// Core guarantees that there will be no request to deploy an artifact which is already deployed,
/// thus runtime should not report an attempt to do so as `ExecutionError`, but should consider it
/// a bug in core.
// TODO: Elaborate constraints on `Runtime::deploy_artifact` futures (ECR-3840)
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
deploy_spec: Vec<u8>,
) -> Box<dyn Future<Item = (), Error = ExecutionError>>;
/// Returns `true` if the specified artifact is deployed in this runtime.
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool;
/// Runs the constructor of a new service instance with the given specification
/// and initial arguments. The constructor can initialize the storage of the service,
/// check for dependencies, etc.
///
/// The constructor runs *exactly once* during the blockchain lifetime for each successfully
/// initialized service instance. That is to say, the constructor is *not* called on a node
/// restart.
///
/// At the same time, when `initiate_adding_service` is called,
/// there is no guarantee that the service will eventually get to the blockchain via
/// `update_service_status`. The consensus may accept an alternative block proposal, in which
/// the service is not instantiated or instantiated with different parameters.
///
/// The `update_service_status` call always takes place
/// in the closest committed block, i.e., before the nearest `Runtime::after_commit()`.
/// The dispatcher routes transactions and `before_transactions` / `after_transactions`
/// events to the service only after `update_service_status()` is called with the same instance
/// specification.
///
/// The runtime should discard the instantiated service instance after completing this method.
/// Otherwise, if the service is successfully committed in the block, it will duplicate the one
/// instantiated in the runtime. There may be compelling reasons for the runtime to retain
/// the instantiated service. For example, if creating an instance takes very long time.
/// In this case, the "garbage" services may be removed from the runtime in `after_commit`
/// because of the time dependence between `update_service_status` and `after_commit` described above.
///
/// The runtime should commit long-term resources for the service only after the
/// `update_service_status()` call. In other words, the runtime must be sure that the service
/// has been committed to the blockchain.
///
/// # Return Value
///
/// Returning an error is a signal of `Runtime` that the
/// service instantiation has failed. As a rule of a thumb, changes made by the
/// `initiate_adding_service` method will be rolled back after such a signal. The exact logic of
/// the rollback is determined by the supervisor.
///
/// An error is one of the expected / handled outcomes of the service instantiation procedure.
/// Thus, verifying prerequisites
/// for instantiation and reporting corresponding failures should be performed at this stage
/// rather than in `update_service_status`.
///
/// Core guarantees that there will be no request to start a service instance which is already running,
/// thus runtime should not report an attempt to do so as `ExecutionError`, but should consider it
/// a bug in core.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
parameters: Vec<u8>,
) -> Result<(), ExecutionError>;
/// Resumes previously stopped service instance with the given specification and arguments.
/// As an example, arguments can be used to update the service configuration.
///
/// The dispatcher ensures that a service instance with the given specification has been
/// previously stopped and has the proper artifact version and name.
///
/// This method has the same workflow as [`initiate_adding_service`] method.
/// The main difference is that `initialize_adding_service` should call the service
/// `initialize` method and `initiate_resuming_service` should call the service `resume` method.
///
/// [`initiate_adding_service`]: #tymethod.initiate_adding_service
fn initiate_resuming_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
parameters: Vec<u8>,
) -> Result<(), ExecutionError>;
/// Notifies runtime about changes of the service instance state.
///
/// This method notifies runtime about a specific service instance state changes in the
/// dispatcher. Runtime should perform corresponding actions in according to changes in
/// the service instance state.
///
/// Method is called for a specific service instance during the `Runtime` lifetime in the
/// following cases:
///
/// - For newly added instances, or modified existing this method is called when the fork
/// with the corresponding changes is committed.
/// - After a node restart, the method is called for all existing service instances regardless
/// of their statuses.
///
/// For newly added instances invocation of this method guarantees that
/// `initiate_adding_service()` has been called with the same `spec` already and returned
/// `Ok(())`. The results of the call (i.e., changes to the blockchain state) will be
/// persisted from the call.
///
/// # Arguments
///
/// `snapshot` is a storage snapshot at the latest height when the method is called:
///
/// - Suppose the service is committed during the node operation. Then `snapshot` is taken at the
/// moment the fork applies for which the corresponding `initiate_adding_service`
/// has been performed.
/// - Suppose the service is stopped during the node operation. `Then `snapshot` is taken at
/// the moment the fork applies for which the corresponding request has been performed.
/// - Suppose the service resumes after the node restart. Then `snapshot` is the storage state
/// at the node start.
///
/// For the built-in services, on the first node start `snapshot` will not contain information
/// on the genesis block. Thus, using some core APIs, like requesting the current
/// blockchain height, will result in a panic.
///
/// `status` is the resulting status of the service instance.
///
/// # Return value
///
/// This method does not return a value, meaning that any error occurred during this method execution
/// is considered critical and should lead to the node stopping.
///
/// It is assumed that if `initiate_adding_service` didn't return an error previously,
/// the runtime is able to update service status and within normal conditions no error is
/// expected to happen.
fn update_service_status(
&mut self,
snapshot: &dyn Snapshot,
spec: &InstanceSpec,
status: &InstanceStatus,
);
/// Gets the migration script to migrate the data of the service to the state usable
/// by a newer version of the artifact.
///
/// An implementation of this method should be idempotent, i.e., return the same script or error
/// for the same input.
///
/// # Invariants Ensured by the Caller
///
/// - `new_artifact` is deployed in the runtime
/// - `data_version < new_artifact.version`
///
/// # Return Value
///
/// - An error signals that the runtime does not know how to migrate the service
/// to a newer version.
/// - `Ok(Some(_))` provides a script to execute against service data. After the script
/// is executed, [`data_version`] of the service will be updated to `end_version`
/// from the script. `end_version` does not need to correspond to the version of `new_artifact`,
/// or to a version of an artifact deployed on the blockchain in general.
/// - `Ok(None)` means that the service does not require data migration. `data_version`
/// of the service will be updated to the version of `new_artifact` once the block
/// with the migration command is committed; see [*Service State Transitions*] for details.
///
/// [`data_version`]: struct.InstanceState.html#field.data_version
/// [*Service State Transitions*]: index.html#service-state-transitions
fn migrate(
&self,
new_artifact: &ArtifactId,
data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError>;
/// Dispatches payload to the method of a specific service instance.
///
/// The call is dispatched iff the service is considered active at the moment.
/// See [*Service State Transitions*] for more details.
///
/// # Arguments
///
/// The service instance name and method ID are provided in the `call_info` argument and
/// the interface name is provided as the corresponding field of the `context` argument.
///
/// A blank interface name denotes the "default" interface; it should be supported by all
/// services. The methods of the default interface are defined by the service artifact
/// and thus may have different signatures for different services.
///
/// A non-empty interface name denotes an interface defined externally to the service instance.
/// In this case, the name is a Protobuf flavor of a fully qualified name
/// (e.g., `vega.Configure`). And the method signatures can be inferred from the name
/// using an interface definition.
///
/// **Note**. Support of non-default interfaces is experimental; as such, an IDL for them
/// is not stabilized yet.
///
/// # Return Value
///
/// - If the service does not implement an interface, returns a `NoSuchInterface` error.
/// - If the interface does not have a method, returns a `NoSuchMethod` error.
///
/// An error returned from this method will lead to the rollback of all changes
/// in the fork enclosed in the `context`.
///
/// [*Service State Transitions*]: index.html#service-state-transitions
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
arguments: &[u8],
) -> Result<(), ExecutionError>;
/// Notifies a service stored in the present runtime about the beginning of the block. Allows
/// the service to modify the blockchain state before any transaction in the block is processed.
///
/// `before_transactions` is called for every service active at the beginning of the block
/// exactly once for each block. Services that will be instantiated within the block do **not**
/// receive a call. The method is not called for the genesis block.
///
/// # Return Value
///
/// An error returned from this method will lead to the rollback of all changes
/// in the fork enclosed in the `context`.
fn before_transactions(&self, context: ExecutionContext<'_>) -> Result<(), ExecutionError>;
/// Notifies a service stored in this runtime about the end of the block. Allows the method
/// to modify the blockchain state after all transactions in the block are processed.
///
/// `after_transactions` is called for every service active at the beginning of the block
/// exactly once for each block. Services instantiated within the block do **not** receive a call.
/// Services instantiated within genesis block are activated **immediately** and
/// thus `after_transactions` is invoked for them in the genesis block.
///
/// # Return value
///
/// An error returned from this method will lead to the rollback of all changes
/// in the fork enclosed in the `context`.
fn after_transactions(&self, context: ExecutionContext<'_>) -> Result<(), ExecutionError>;
/// Notifies the runtime about commit of a new block.
///
/// This method is called *after* all `update_service_status` calls related
/// to the same block. The method is called exactly once for each block in the blockchain,
/// including the genesis block.
///
/// A block is not yet persisted when this method is called. The `snapshot` provides an up-to-date
/// block information. It corresponds exactly to the information
/// eventually persisted.
///
/// `mailbox` is used to send async commands to the dispatcher. This mechanism is used, e.g.,
/// by the supervisor service to enqueue artifact deployment. A runtime may ignore `mailbox`
/// if its services (or the runtime itself) do not require privileged access to the dispatcher.
fn after_commit(&mut self, snapshot: &dyn Snapshot, mailbox: &mut Mailbox);
/// Notifies the runtime that it has to shutdown.
///
/// This callback is invoked sequentially for each runtime just before the node shutdown.
/// Thus, the runtimes can stop themselves gracefully.
///
/// Invoking this callback is the last operation for the runtime.
/// This method is a part of shutdown process. Thus, the runtimes can block and perform
/// heavy operations here if needed.
fn shutdown(&mut self) {}
}
impl<T: Runtime> From<T> for Box<dyn Runtime> {
fn from(value: T) -> Self {
Box::new(value)
}
}
/// A subset of [`Runtime`]s with a well-known runtime identifier.
///
/// [`Runtime`]: trait.Runtime.html
pub trait WellKnownRuntime: Runtime {
/// Identifier of the present runtime.
const ID: u32;
}
// TODO: Rethink visibility [ECR-3913]
/// Instance of [`Runtime`] with the corresponding ID.
///
/// [`Runtime`]: trait.Runtime.html
#[derive(Debug)]
pub struct RuntimeInstance {
/// Identifier of the enclosed runtime.
pub id: u32,
/// Enclosed `Runtime` object.
pub instance: Box<dyn Runtime>,
/// No-op field for forward compatibility.
non_exhaustive: (),
}
impl RuntimeInstance {
/// Constructs a new `RuntimeInstance` object.
pub fn new(id: u32, instance: Box<dyn Runtime>) -> Self {
Self {
id,
instance,
non_exhaustive: (),
}
}
}
impl<T: WellKnownRuntime> From<T> for RuntimeInstance {
fn from(runtime: T) -> Self {
RuntimeInstance::new(T::ID, runtime.into())
}
}
/// Instance descriptor contains information to access the running service instance.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct InstanceDescriptor<'a> {
/// A unique numeric ID of the service instance.
/// [Read more.](struct.InstanceSpec.html#structfield.id)
pub id: InstanceId,
/// A unique name of the service instance.
/// [Read more.](struct.InstanceSpec.html#structfield.name)
pub name: &'a str,
/// No-op field for forward compatibility.
non_exhaustive: (),
}
impl<'a> InstanceDescriptor<'a> {
/// Creates a new `InstanceDescriptor` object.
pub fn new(id: InstanceId, name: &'a str) -> Self {
Self {
id,
name,
non_exhaustive: (),
}
}
}
impl fmt::Display for InstanceDescriptor<'_> {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "{}:{}", self.id, self.name)
}
}
| {
id as Self
} |
_compat.py | # -*- coding: utf-8 -*-
"""
Salt compatibility code
"""
# pylint: disable=import-error,unused-import,invalid-name,W0231,W0233
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import binascii
import logging
import sys
# Import 3rd-party libs
from salt.exceptions import SaltException
from salt.ext.six import binary_type, integer_types, string_types, text_type
from salt.ext.six.moves import StringIO, cStringIO
log = logging.getLogger(__name__)
try:
# Python >2.5
import xml.etree.cElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# Python >2.5
import xml.etree.ElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# normal cElementTree install
import elementtree.cElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# normal ElementTree install
import elementtree.ElementTree as ElementTree
except Exception: # pylint: disable=broad-except
ElementTree = None
# True if we are running on Python 3.
PY3 = sys.version_info.major == 3
if PY3:
import builtins
exceptions = builtins
else:
import exceptions
if ElementTree is not None:
if not hasattr(ElementTree, "ParseError"):
class ParseError(Exception):
"""
older versions of ElementTree do not have ParseError
"""
ElementTree.ParseError = ParseError
def text_(s, encoding="latin-1", errors="strict"):
"""
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
"""
return s.decode(encoding, errors) if isinstance(s, binary_type) else s
def bytes_(s, encoding="latin-1", errors="strict"):
"""
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
"""
return s.encode(encoding, errors) if isinstance(s, text_type) else s
def ascii_native_(s):
"""
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s)``
"""
if isinstance(s, text_type):
s = s.encode("ascii")
return str(s, "ascii", "strict") if PY3 else s
def native_(s, encoding="latin-1", errors="strict"):
"""
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
"""
if PY3:
out = s if isinstance(s, text_type) else str(s, encoding, errors)
else:
out = s.encode(encoding, errors) if isinstance(s, text_type) else str(s)
return out
def string_io(data=None): # cStringIO can't handle unicode
"""
Pass data through to stringIO module and return result
"""
try:
return cStringIO(bytes(data))
except (UnicodeEncodeError, TypeError):
return StringIO(data)
try:
if PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
except ImportError:
ipaddress = None
class IPv6AddressScoped(ipaddress.IPv6Address):
"""
Represent and manipulate single IPv6 Addresses.
Scope-aware version
"""
def __init__(self, address):
"""
Instantiate a new IPv6 address object. Scope is moved to an attribute 'scope'.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
:param address:
"""
# pylint: disable-all
if not hasattr(self, "_is_packed_binary"):
# This method (below) won't be around for some Python 3 versions
# and we need check this differently anyway
self._is_packed_binary = lambda p: isinstance(p, bytes)
# pylint: enable-all
if isinstance(address, string_types) and "%" in address:
buff = address.split("%")
if len(buff) != 2:
raise SaltException('Invalid IPv6 address: "{}"'.format(address))
address, self.__scope = buff
else:
self.__scope = None
if sys.version_info.major == 2:
ipaddress._BaseAddress.__init__(self, address)
ipaddress._BaseV6.__init__(self, address)
else:
# Python 3.4 fix. Versions higher are simply not affected
# https://github.com/python/cpython/blob/3.4/Lib/ipaddress.py#L543-L544
self._version = 6
self._max_prefixlen = ipaddress.IPV6LENGTH
# Efficient constructor from integer.
if isinstance(address, integer_types):
self._check_int_address(address)
self._ip = address
elif self._is_packed_binary(address):
self._check_packed_address(address, 16)
self._ip = int(binascii.hexlify(address), 16)
else:
address = str(address)
if "/" in address:
raise ipaddress.AddressValueError(
"Unexpected '/' in {}".format(address)
)
self._ip = self._ip_int_from_string(address)
def | (self, data):
"""
Check if data is hexadecimal packed
:param data:
:return:
"""
packed = False
if isinstance(data, bytes) and len(data) == 16 and b":" not in data:
try:
packed = bool(int(binascii.hexlify(data), 16))
except (ValueError, TypeError):
pass
return packed
@property
def scope(self):
"""
Return scope of IPv6 address.
:return:
"""
return self.__scope
def __str__(self):
return text_type(
self._string_from_ip_int(self._ip)
+ ("%" + self.scope if self.scope is not None else "")
)
class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
"""
Update
"""
def __init__(self, address):
if (
PY3
and isinstance(address, (bytes, int))
or not PY3
and isinstance(address, int)
):
IPv6AddressScoped.__init__(self, address)
self.network = ipaddress.IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = ipaddress._split_optional_netmask(address)
IPv6AddressScoped.__init__(self, addr[0])
self.network = ipaddress.IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
if ipaddress:
ipaddress.IPv6Address = IPv6AddressScoped
if sys.version_info.major == 2:
ipaddress.IPv6Interface = IPv6InterfaceScoped
| _is_packed_binary |
index.js | import React from "react";
import ReactDOM from "react-dom";
import App from "./App";
import "./index.css";
ReactDOM.render(
<App />,
document.getElementById("root") // eslint-disable-line no-undef | ); |
|
aws_driver.py | # type: ignore
import json
import uuid
from json import JSONDecodeError
from typing import Tuple, Dict, List
import boto3
from melange.drivers.interfaces import Queue, Topic, MessagingDriver, Message
class AWSDriver(MessagingDriver):
def __init__(self, **kwargs):
super().__init__()
self.max_number_of_messages = kwargs.get("max_number_of_messages", 10)
self.visibility_timeout = kwargs.get("visibility_timeout", 100)
self.wait_time_seconds = kwargs.get("wait_time_seconds", 10)
def declare_topic(self, topic_name) -> Topic:
sns = boto3.resource("sns")
topic = sns.create_topic(Name=topic_name)
return topic
def get_queue(self, queue_name) -> Queue:
sqs_res = boto3.resource("sqs")
return sqs_res.get_queue_by_name(QueueName=queue_name)
def declare_queue(
self,
queue_name: str,
*topics_to_bind: Topic,
dead_letter_queue_name: str = None,
**kwargs
) -> Tuple[Queue, Queue]:
try:
queue = self.get_queue(queue_name)
except Exception:
queue = self._create_queue(queue_name, content_based_deduplication="true")
if topics_to_bind:
statements = []
for topic in topics_to_bind:
statement = {
"Sid": "Sid{}".format(uuid.uuid4()),
"Effect": "Allow",
"Principal": "*",
"Resource": queue.attributes["QueueArn"], |
statements.append(statement)
subscription = topic.subscribe(
Protocol="sqs",
Endpoint=queue.attributes[
"QueueArn"
], # , Attributes={"RawMessageDelivery": "true"}
)
if kwargs.get("filter_events"):
filter_policy = {"event_type": kwargs["filter_events"]}
else:
filter_policy = {}
subscription.set_attributes(
AttributeName="FilterPolicy",
AttributeValue=json.dumps(filter_policy),
)
policy = {
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": statements,
}
queue.set_attributes(Attributes={"Policy": json.dumps(policy)})
dead_letter_queue = None
if dead_letter_queue_name:
try:
dead_letter_queue = self.get_queue(dead_letter_queue_name)
except Exception:
dead_letter_queue = self._create_queue(
dead_letter_queue_name, content_based_deduplication="true"
)
redrive_policy = {
"deadLetterTargetArn": dead_letter_queue.attributes["QueueArn"],
"maxReceiveCount": "4",
}
queue.set_attributes(
Attributes={"RedrivePolicy": json.dumps(redrive_policy)}
)
return queue, dead_letter_queue
def _create_queue(self, queue_name: str, **kwargs) -> Queue:
sqs_res = boto3.resource("sqs")
fifo = queue_name.endswith(".fifo")
attributes = {}
if fifo:
attributes["FifoQueue"] = "true"
attributes["ContentBasedDeduplication"] = (
"true" if kwargs.get("content_based_deduplication") else "false"
)
queue = sqs_res.create_queue(QueueName=queue_name, Attributes=attributes)
return queue
def retrieve_messages(self, queue: Queue, attempt_id=None) -> List[Message]:
kwargs = dict(
MaxNumberOfMessages=self.max_number_of_messages,
VisibilityTimeout=self.visibility_timeout,
WaitTimeSeconds=self.wait_time_seconds,
MessageAttributeNames=["All"],
AttributeNames=["All"],
)
if attempt_id:
kwargs["ReceiveRequestAttemptId"] = attempt_id
messages = queue.receive_messages(**kwargs)
# We need to differentiate here whether the message came from SNS or SQS
return [self._construct_message(message) for message in messages]
def queue_publish(
self,
content: str,
queue,
event_type_name: str = None,
message_group_id: str = None,
message_deduplication_id: str = None,
):
kwargs = dict(MessageBody=json.dumps({"Message": content}))
if event_type_name:
kwargs["MessageAttributes"] = {
"event_type": {"DataType": "String", "StringValue": event_type_name}
}
if message_group_id:
kwargs["MessageGroupId"] = message_group_id
if message_deduplication_id:
kwargs["MessageDeduplicationId"] = message_deduplication_id
queue.send_message(**kwargs)
def publish(
self,
content: str,
topic: Topic,
event_type_name: str,
extra_attributes: Dict = None,
):
args = dict(
Message=content,
MessageAttributes={
"event_type": {"DataType": "String", "StringValue": event_type_name}
},
)
if extra_attributes:
if "subject" in extra_attributes:
args["Subject"] = extra_attributes["subject"]
if "message_attributes" in extra_attributes:
args["MessageAttributes"].update(extra_attributes["message_attributes"])
if "message_structure" in extra_attributes:
args["MessageStructure"] = extra_attributes["message_structure"]
response = topic.publish(**args)
if "MessageId" not in response:
raise ConnectionError("Could not send the event to the SNS TOPIC")
def acknowledge(self, message: Message) -> None:
message.metadata.delete()
def close_connection(self) -> None:
pass
def delete_queue(self, queue: Queue) -> None:
queue.delete()
def delete_topic(self, topic: Topic) -> None:
topic.delete()
def _construct_message(self, message) -> Message:
body = message.body
manifest = ""
try:
message_content = json.loads(body)
if "Message" in message_content:
content = message_content["Message"]
# Does the content have more attributes? If so, it is very likely that the message came from a non-raw
# SNS redirection
if "MessageAttributes" in message_content:
manifest = (
message_content["MessageAttributes"]
.get("event_type", {})
.get("Value")
or ""
)
else:
content = message_content
except JSONDecodeError:
content = body
manifest = (
manifest
or message.message_attributes.get("event_type", {}).get("StringValue")
or ""
)
return Message(message.message_id, content, message, manifest) | "Action": "sqs:SendMessage",
"Condition": {"ArnEquals": {"aws:SourceArn": topic.arn}},
} |
express_route_circuit_py3.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class ExpressRouteCircuit(Resource):
| """ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2018_04_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2018_04_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2018_04_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2018_04_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2018_04_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, sku=None, allow_classic_operations: bool=None, circuit_provisioning_state: str=None, service_provider_provisioning_state=None, authorizations=None, peerings=None, service_key: str=None, service_provider_notes: str=None, service_provider_properties=None, provisioning_state: str=None, gateway_manager_etag: str=None, **kwargs) -> None:
super(ExpressRouteCircuit, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.sku = sku
self.allow_classic_operations = allow_classic_operations
self.circuit_provisioning_state = circuit_provisioning_state
self.service_provider_provisioning_state = service_provider_provisioning_state
self.authorizations = authorizations
self.peerings = peerings
self.service_key = service_key
self.service_provider_notes = service_provider_notes
self.service_provider_properties = service_provider_properties
self.provisioning_state = provisioning_state
self.gateway_manager_etag = gateway_manager_etag
self.etag = None |
|
backfill_job.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
from collections import OrderedDict
from datetime import datetime
from sqlalchemy.orm.session import Session, make_transient
from airflow import executors, models
from airflow.exceptions import (
AirflowException, DagConcurrencyLimitReached, NoAvailablePoolSlot, PoolNotFound,
TaskConcurrencyLimitReached,
)
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagPickle, DagRun
from airflow.ti_deps.dep_context import BACKFILL_QUEUED_DEPS, DepContext
from airflow.utils import timezone
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.db import provide_session
from airflow.utils.state import State
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
STATES_COUNT_AS_RUNNING = (State.RUNNING, State.QUEUED)
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus:
"""
Internal status of the backfill job. This class is intended to be instantiated
only within a BackfillJob instance and will track the execution of tasks,
e.g. running, skipped, succeeded, failed, etc. Information about the dag runs
related to the backfill job are also being tracked in this structure,
.e.g finished runs, etc. Any other status related information related to the
execution of dag runs / tasks can be included in this structure since it makes
it easier to pass it around.
"""
# TODO(edgarRd): AIRFLOW-1444: Add consistency check on counts
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
"""
:param to_run: Tasks to run in the backfill
:type to_run: dict[tuple[string, string, datetime.datetime], airflow.models.TaskInstance]
:param running: Maps running task instance key to task instance object
:type running: dict[tuple[string, string, datetime.datetime], airflow.models.TaskInstance]
:param skipped: Tasks that have been skipped
:type skipped: set[tuple[string, string, datetime.datetime]]
:param succeeded: Tasks that have succeeded so far
:type succeeded: set[tuple[string, string, datetime.datetime]]
:param failed: Tasks that have failed
:type failed: set[tuple[string, string, datetime.datetime]]
:param not_ready: Tasks not ready for execution
:type not_ready: set[tuple[string, string, datetime.datetime]]
:param deadlocked: Deadlocked tasks
:type deadlocked: set[tuple[string, string, datetime.datetime]]
:param active_runs: Active dag runs at a certain point in time
:type active_runs: list[DagRun]
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:type executed_dag_run_dates: set[datetime.datetime]
:param finished_runs: Number of finished runs so far
:type finished_runs: int
:param total_runs: Number of total dag runs able to run
:type total_runs: int
"""
self.to_run = to_run or OrderedDict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
*args, **kwargs):
"""
:param dag: DAG object.
:type dag: airflow.models.DAG
:param start_date: start date for the backfill date range.
:type start_date: datetime.datetime
:param end_date: end date for the backfill date range.
:type end_date: datetime.datetime
:param mark_success: flag whether to mark the task auto success.
:type mark_success: bool
:param donot_pickle: whether pickle
:type donot_pickle: bool
:param ignore_first_depends_on_past: whether to ignore depend on past
:type ignore_first_depends_on_past: bool
:param ignore_task_deps: whether to ignore the task dependency
:type ignore_task_deps: bool
:param pool: pool to backfill
:type pool: str
:param delay_on_limit_secs:
:param verbose:
:type verbose: flag to whether display verbose message to backfill console
:param conf: a dictionary which user could pass k-v pairs for backfill
:type conf: dictionary
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:type rerun_failed_tasks: bool
:param run_backwards: Whether to process the dates from most to least recent
:type run_backwards bool
:param args:
:param kwargs:
"""
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
self.run_backwards = run_backwards
super().__init__(*args, **kwargs)
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date: datetime, dag: DAG, session: Session = None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:param dag: DAG
:param session: the database session object
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (dag.schedule_interval and
not dag.is_subdag)
else False)
current_active_dag_count = dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= dag.max_active_runs):
return None
run = run or dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
try:
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session, commit=False)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
session.commit()
except Exception:
session.rollback()
raise
return tasks_to_run
def _log_progress(self, ti_status):
self.log.info(
'[backfill progress] | finished run %s of %s | tasks waiting: %s | succeeded: %s | '
'running: %s | failed: %s | skipped: %s | deadlocked: %s | not ready: %s',
ti_status.finished_runs, ti_status.total_runs, len(ti_status.to_run), len(ti_status.succeeded),
len(ti_status.running), len(ti_status.failed), len(ti_status.skipped), len(ti_status.deadlocked),
len(ti_status.not_ready)
)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
@provide_session
def _per_task_process(task, key, ti, session=None):
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id, include_subdags=True)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
backfill_context = DepContext(
deps=BACKFILL_QUEUED_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
ti.refresh_from_db(lock_for_update=True, session=session)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
ti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttm
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
return
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# special case
if ti.state == State.UP_FOR_RESCHEDULE:
self.log.debug(
"Task instance %s reschedule period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
try:
for task in self.dag.topological_sort(include_subdag_tasks=True):
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
pool = session.query(models.Pool) \
.filter(models.Pool.pool == task.pool) \
.first()
if not pool:
raise PoolNotFound('Unknown pool: {}'.format(task.pool))
open_slots = pool.open_slots(session=session)
if open_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are "
"%s open slots in pool %s".format(
open_slots, task.pool))
num_running_task_instances_in_dag = DAG.get_num_task_instances(
self.dag_id,
states=self.STATES_COUNT_AS_RUNNING,
)
if num_running_task_instances_in_dag >= self.dag.concurrency:
raise DagConcurrencyLimitReached(
"Not scheduling since DAG concurrency limit "
"is reached."
)
if task.task_concurrency:
num_running_task_instances_in_task = DAG.get_num_task_instances(
dag_id=self.dag_id,
task_ids=[task.task_id],
states=self.STATES_COUNT_AS_RUNNING,
)
if num_running_task_instances_in_task >= task.task_concurrency:
raise TaskConcurrencyLimitReached(
"Not scheduling since Task concurrency limit "
"is reached."
)
_per_task_process(task, key, ti)
except (NoAvailablePoolSlot, DagConcurrencyLimitReached, TaskConcurrencyLimitReached) as e:
self.log.debug(e)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
"""
for next_run_date in run_dates:
for dag in [self.dag] + self.dag.subdags:
dag_run = self._get_dag_run(next_run_date, dag, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):
"""
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished():
dag_run.set_state(State.FAILED)
session.merge(dag_run)
@provide_session
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
|
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
| self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs) |
range_test.go | package stats
import "testing"
func | (t *testing.T) {
data := Data{1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 0, 6, 5, 4, 43, 53, 4, 3, 43}
r, _ := Range(data)
if r != 53 {
t.Errorf("Range(data) is block")
}
}
| TestRange |
scalar_field.rs | use crate::{
ast,
types::{DefaultAttribute, FieldWithArgs, OperatorClassStore, ScalarField, ScalarType, SortOrder},
walkers::{EnumWalker, ModelWalker, Walker},
OperatorClass, ParserDatabase, ScalarFieldType,
};
use diagnostics::Span;
use either::Either;
use super::IndexFieldWalker;
/// A scalar field, as part of a model.
#[derive(Debug, Copy, Clone)]
pub struct ScalarFieldWalker<'db> {
pub(crate) model_id: ast::ModelId,
pub(crate) field_id: ast::FieldId,
pub(crate) db: &'db ParserDatabase,
pub(crate) scalar_field: &'db ScalarField,
}
impl<'db> PartialEq for ScalarFieldWalker<'db> {
fn eq(&self, other: &Self) -> bool {
self.model_id == other.model_id && self.field_id == other.field_id
}
}
impl<'db> Eq for ScalarFieldWalker<'db> {}
impl<'db> ScalarFieldWalker<'db> {
/// The ID of the field node in the AST.
pub fn field_id(self) -> ast::FieldId {
self.field_id
}
/// The field node in the AST.
pub fn ast_field(self) -> &'db ast::Field {
&self.db.ast[self.model_id][self.field_id]
}
/// The name of the field.
pub fn name(self) -> &'db str {
self.ast_field().name()
}
/// The `@default()` AST attribute on the field, if any.
pub fn default_attribute(self) -> Option<&'db ast::Attribute> {
self.scalar_field
.default
.as_ref()
.map(|d| d.default_attribute)
.map(|id| &self.db.ast[id])
}
/// The final database name of the field. See crate docs for explanations on database names.
pub fn database_name(self) -> &'db str {
self.attributes()
.mapped_name
.map(|id| &self.db[id])
.unwrap_or_else(|| self.name())
}
/// Does the field have an `@default(autoincrement())` attribute?
pub fn is_autoincrement(self) -> bool {
self.default_value().map(|dv| dv.is_autoincrement()).unwrap_or(false)
}
/// Does the field define a primary key by its own.
pub fn is_single_pk(self) -> bool {
self.model().field_is_single_pk(self.field_id)
}
/// Is the field part of a compound primary key.
pub fn is_part_of_a_compound_pk(self) -> bool {
self.model().field_is_part_of_a_compound_pk(self.field_id)
}
/// Is there an `@ignore` attribute on the field?
pub fn is_ignored(self) -> bool {
self.attributes().is_ignored
}
/// Is the field optional / nullable?
pub fn is_optional(self) -> bool {
self.ast_field().arity.is_optional()
}
/// Is there an `@updateAt` attribute on the field?
pub fn is_updated_at(self) -> bool {
self.attributes().is_updated_at
}
fn attributes(self) -> &'db ScalarField {
self.scalar_field
}
/// Is this field's type an enum? If yes, walk the enum.
pub fn field_type_as_enum(self) -> Option<EnumWalker<'db>> {
match self.scalar_field_type() {
ScalarFieldType::Enum(enum_id) => Some(Walker {
db: self.db,
id: enum_id,
}),
_ => None,
}
}
/// The name in the `@map(<name>)` attribute.
pub fn mapped_name(self) -> Option<&'db str> {
self.attributes().mapped_name.map(|id| &self.db[id])
}
/// The model that contains the field.
pub fn model(self) -> ModelWalker<'db> {
ModelWalker {
model_id: self.model_id,
db: self.db,
model_attributes: &self.db.types.model_attributes[&self.model_id],
}
}
/// (attribute scope, native type name, arguments, span)
///
/// For example: `@db.Text` would translate to ("db", "Text", &[], <the span>)
pub fn raw_native_type(self) -> Option<(&'db str, &'db str, &'db [String], Span)> {
let db = self.db;
self.attributes()
.native_type
.as_ref()
.map(move |(datasource_name, name, args, span)| (&db[*datasource_name], &db[*name], args.as_slice(), *span))
}
/// Is the type of the field `Unsupported("...")`?
pub fn is_unsupported(self) -> bool {
matches!(self.ast_field().field_type, ast::FieldType::Unsupported(_, _))
}
/// The `@default()` attribute of the field, if any.
pub fn default_value(self) -> Option<DefaultValueWalker<'db>> {
self.attributes().default.as_ref().map(|default| DefaultValueWalker {
model_id: self.model_id,
field_id: self.field_id,
db: self.db,
default,
})
}
/// The type of the field.
pub fn scalar_field_type(self) -> ScalarFieldType {
self.attributes().r#type
}
/// The type of the field in case it is a scalar type (not an enum, not a composite type).
pub fn scalar_type(self) -> Option<ScalarType> {
match &self.scalar_field.r#type {
ScalarFieldType::BuiltInScalar(scalar) => Some(*scalar),
_ => None,
}
}
}
/// An `@default()` attribute on a field.
#[derive(Clone, Copy)]
pub struct DefaultValueWalker<'db> {
pub(super) model_id: ast::ModelId,
pub(super) field_id: ast::FieldId,
pub(super) db: &'db ParserDatabase,
pub(super) default: &'db DefaultAttribute,
}
impl<'db> DefaultValueWalker<'db> {
/// The AST node of the attribute.
pub fn | (self) -> &'db ast::Attribute {
&self.db.ast[self.default.default_attribute]
}
/// The value expression in the `@default` attribute.
///
/// ```ignore
/// score Int @default(0)
/// ^
/// ```
pub fn value(self) -> &'db ast::Expression {
&self.ast_attribute().arguments.arguments[self.default.argument_idx].value
}
/// Is this an `@default(autoincrement())`?
pub fn is_autoincrement(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "autoincrement")
}
/// Is this an `@default(cuid())`?
pub fn is_cuid(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "cuid")
}
/// Is this an `@default(dbgenerated())`?
pub fn is_dbgenerated(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "dbgenerated")
}
/// Is this an `@default(auto())`?
pub fn is_auto(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "auto")
}
/// Is this an `@default(now())`?
pub fn is_now(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "now")
}
/// Is this an `@default(sequence())`?
pub fn is_sequence(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "sequence")
}
/// Is this an `@default(uuid())`?
pub fn is_uuid(self) -> bool {
matches!(self.value(), ast::Expression::Function(name, _, _) if name == "uuid")
}
/// The mapped name of the default value. Not applicable to all connectors. See crate docs for
/// details on mapped names.
///
/// ```ignore
/// name String @default("george", map: "name_default_to_george")
/// ^^^^^^^^^^^^^^^^^^^^^^^^
/// ```
pub fn mapped_name(self) -> Option<&'db str> {
self.default.mapped_name.map(|id| &self.db[id])
}
/// The field carrying the default attribute.
///
/// ```ignore
/// name String @default("george")
/// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/// ```
pub fn field(self) -> ScalarFieldWalker<'db> {
ScalarFieldWalker {
model_id: self.model_id,
field_id: self.field_id,
db: self.db,
scalar_field: &self.db.types.scalar_fields[&(self.model_id, self.field_id)],
}
}
}
/// An operator class defines the operators allowed in an index. Mostly
/// a PostgreSQL thing.
#[derive(Copy, Clone)]
pub struct OperatorClassWalker<'db> {
pub(crate) class: &'db OperatorClassStore,
pub(crate) db: &'db ParserDatabase,
}
impl<'db> OperatorClassWalker<'db> {
/// Gets the operator class of the indexed field.
///
/// ```ignore
/// @@index(name(ops: InetOps))
/// // ^ Either::Left(InetOps)
/// @@index(name(ops: raw("tsvector_ops")))
/// // ^ Either::Right("tsvector_ops")
pub fn get(self) -> Either<OperatorClass, &'db str> {
match self.class.inner {
Either::Left(class) => Either::Left(class),
Either::Right(id) => Either::Right(&self.db[id]),
}
}
}
/// A scalar field as referenced in a key specification (id, index or unique).
#[derive(Copy, Clone)]
pub struct ScalarFieldAttributeWalker<'db> {
pub(crate) model_id: ast::ModelId,
pub(crate) fields: &'db [FieldWithArgs],
pub(crate) db: &'db ParserDatabase,
pub(crate) field_arg_id: usize,
}
impl<'db> ScalarFieldAttributeWalker<'db> {
fn args(self) -> &'db FieldWithArgs {
&self.fields[self.field_arg_id]
}
/// The length argument on the field.
///
/// ```ignore
/// @@index(name(length: 10))
/// ^^
/// ```
pub fn length(self) -> Option<u32> {
self.args().length
}
/// A custom operator class to control the operators catched by the index.
///
/// ```ignore
/// @@index([name(ops: InetOps)], type: Gist)
/// ^^^^^^^
/// ```
pub fn operator_class(self) -> Option<OperatorClassWalker<'db>> {
self.args()
.operator_class
.as_ref()
.map(|class| OperatorClassWalker { class, db: self.db })
}
/// The underlying field.
///
/// ```ignore
/// // either this
/// model Test {
/// id Int @id
/// name String
/// ^^^^^^^^^^^^^^^^^^
/// kind Int
///
/// @@index([name])
/// }
///
/// // or this
/// type A {
/// field String
/// ^^^^^^^^^^^^
/// }
///
/// model Test {
/// id Int @id
/// a A
///
/// @@index([a.field])
/// }
/// ```
pub fn as_index_field(self) -> IndexFieldWalker<'db> {
let path = &self.args().path;
let field_id = path.field_in_index();
match path.type_holding_the_indexed_field() {
None => {
let field_id = path.field_in_index();
let walker = self.db.walk_model(self.model_id).scalar_field(field_id);
IndexFieldWalker::new(walker)
}
Some(ctid) => {
let walker = self.db.walk_composite_type(ctid).field(field_id);
IndexFieldWalker::new(walker)
}
}
}
/// Gives the full path from the current model to the field included in the index.
/// For example, if the field is through two composite types:
///
/// ```ignore
/// type A {
/// field Int
/// }
///
/// type B {
/// a A
/// }
///
/// model C {
/// id Int @id
/// b B
///
/// @@index([b.a.field])
/// }
/// ```
///
/// The method would return a vector from model to the final field:
///
/// ```ignore
/// vec![("b", None), ("a", Some("B")), ("field", Some("A"))];
/// ```
///
/// The first part of the tuple is the name of the field, the second part is
/// the name of the composite type.
///
/// This method prefers the prisma side naming, and should not be used when
/// writing to the database.
pub fn as_path_to_indexed_field(self) -> Vec<(&'db str, Option<&'db str>)> {
let path = &self.args().path;
let root = self.db.ast[self.model_id][path.root()].name.name.as_str();
let mut result = vec![(root, None)];
for (ctid, field_id) in path.path() {
let ct = &self.db.ast[*ctid];
let field = ct[*field_id].name.name.as_str();
result.push((field, Some(ct.name.name.as_str())));
}
result
}
/// Similar to the method [`as_path_to_indexed_field`], but prefers the
/// mapped names and is to be used when defining indices in the database.
///
/// [`as_path_to_indexed_field`]: struct.ScalarFieldAttributeWalker
pub fn as_mapped_path_to_indexed_field(self) -> Vec<(&'db str, Option<&'db str>)> {
let path = &self.args().path;
let root = {
let mapped = &self.db.types.scalar_fields[&(self.model_id, path.root())].mapped_name;
mapped
.and_then(|id| self.db.interner.get(id))
.unwrap_or_else(|| self.db.ast[self.model_id][path.root()].name.name.as_str())
};
let mut result = vec![(root, None)];
for (ctid, field_id) in path.path() {
let ct = &self.db.ast[*ctid];
let field = &self.db.types.composite_type_fields[&(*ctid, *field_id)]
.mapped_name
.and_then(|id| self.db.interner.get(id))
.unwrap_or_else(|| ct[*field_id].name.name.as_str());
result.push((field, Some(ct.name.name.as_str())));
}
result
}
/// The sort order (asc or desc) on the field.
///
/// ```ignore
/// @@index(name(sort: Desc))
/// ^^^^
/// ```
pub fn sort_order(&self) -> Option<SortOrder> {
self.args().sort_order
}
}
| ast_attribute |
canonical_serialization_test.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//https://rust-lang.github.io/rust-clippy/master/index.html#blacklisted_name
//disable it in test so that we can use variable names such as 'foo' and 'bar'
#![allow(clippy::blacklisted_name)]
#![allow(clippy::many_single_char_names)]
use super::*;
use byteorder::WriteBytesExt;
use failure::Result;
use std::u32;
// Do not change the test vectors. Please read the comment below.
const TEST_VECTOR_1: &str = "ffffffffffffffff060000006463584d4237640000000000000009000000000102\
03040506070805050505050505050505050505050505050505050505050505050505\
05050505630000000103000000010000000103000000161543030000000038150300\
0000160a05040000001415596903000000c9175a";
// Why do we need test vectors?
//
// 1. Sometimes it helps to catch common bugs between serialization and
// deserialization functions that would have been missed by a simple round trip test.
// For example, if there's a bug in a shared procedure that serializes and
// deserialize both calls then roundtrip might miss it.
//
// 2. It helps to catch code changes that inadvertently introduce breaking changes
// in the serialization format that is incompatible with what generated in the
// past which would be missed by roundtrip tests, or changes that are not backward
// compatible in the sense that it may fail to deserialize bytes generated in the past.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Addr(pub [u8; 32]);
impl Addr {
fn new(bytes: [u8; 32]) -> Self {
Addr(bytes)
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
struct Foo {
a: u64,
b: Vec<u8>,
c: Bar,
d: bool,
e: BTreeMap<Vec<u8>, Vec<u8>>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
struct | {
a: u64,
b: Vec<u8>,
c: Addr,
d: u32,
}
impl CanonicalSerialize for Foo {
fn serialize(&self, serializer: &mut impl CanonicalSerializer) -> Result<()> {
serializer
.encode_u64(self.a)?
.encode_variable_length_bytes(&self.b)?
.encode_struct(&self.c)?
.encode_bool(self.d)?
.encode_btreemap(&self.e)?;
Ok(())
}
}
impl CanonicalSerialize for Bar {
fn serialize(&self, serializer: &mut impl CanonicalSerializer) -> Result<()> {
serializer
.encode_u64(self.a)?
.encode_variable_length_bytes(&self.b)?
.encode_raw_bytes(&self.c.0)?
.encode_u32(self.d)?;
Ok(())
}
}
impl CanonicalDeserialize for Foo {
fn deserialize(deserializer: &mut impl CanonicalDeserializer) -> Result<Self> {
let a = deserializer.decode_u64()?;
let b = deserializer.decode_variable_length_bytes()?;
let c: Bar = deserializer.decode_struct::<Bar>()?;
let d: bool = deserializer.decode_bool()?;
let e: BTreeMap<Vec<u8>, Vec<u8>> = deserializer.decode_btreemap()?;
Ok(Foo { a, b, c, d, e })
}
}
impl CanonicalDeserialize for Bar {
fn deserialize(deserializer: &mut impl CanonicalDeserializer) -> Result<Self> {
let a = deserializer.decode_u64()?;
let b = deserializer.decode_variable_length_bytes()?;
let c = deserializer.decode_bytes_with_len(32)?;
let mut cc: [u8; 32] = [0; 32];
cc.copy_from_slice(c.as_slice());
let d = deserializer.decode_u32()?;
Ok(Bar {
a,
b,
c: Addr::new(cc),
d,
})
}
}
#[test]
fn test_btreemap_encode() {
let mut map = BTreeMap::new();
let value = vec![54, 20, 21, 200];
let key1 = vec![0]; // after serialization: [1, 0]
let key2 = vec![0, 6]; // after serialization: [2, 0, 6]
let key3 = vec![1]; // after serialization: [1, 1]
let key4 = vec![2]; // after serialization: [1, 2]
map.insert(key1.clone(), value.clone());
map.insert(key2.clone(), value.clone());
map.insert(key3.clone(), value.clone());
map.insert(key4.clone(), value.clone());
let serialized_bytes = SimpleSerializer::<Vec<u8>>::serialize(&map).unwrap();
let mut deserializer = SimpleDeserializer::new(&serialized_bytes);
// ensure the order was encoded in lexicographic order
assert_eq!(deserializer.raw_bytes.read_u32::<Endianness>().unwrap(), 4);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), key1);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), value);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), key3);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), value);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), key4);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), value);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), key2);
assert_eq!(deserializer.decode_variable_length_bytes().unwrap(), value);
}
#[test]
fn test_serialization_roundtrip() {
let bar = Bar {
a: 50,
b: vec![10u8; 100],
c: Addr::new([3u8; 32]),
d: 12,
};
let mut map = BTreeMap::new();
map.insert(vec![0, 56, 21], vec![22, 10, 5]);
map.insert(vec![1], vec![22, 21, 67]);
map.insert(vec![20, 21, 89, 105], vec![201, 23, 90]);
let foo = Foo {
a: 1,
b: vec![32, 41, 190, 200, 2, 5, 90, 100, 123, 234, 159, 159, 101],
c: bar,
d: false,
e: map,
};
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
foo.serialize(&mut serializer).unwrap();
let serialized_bytes = serializer.get_output();
let mut deserializer = SimpleDeserializer::new(&serialized_bytes);
let deserialized_foo = Foo::deserialize(&mut deserializer).unwrap();
assert_eq!(foo, deserialized_foo);
assert_eq!(
deserializer.raw_bytes.position(),
deserializer.raw_bytes.get_ref().len() as u64
);
}
#[test]
fn test_encode_vec() {
let bar1 = Bar {
a: 55,
b: vec![10u8; 100],
c: Addr::new([3u8; 32]),
d: 77,
};
let bar2 = Bar {
a: 123,
b: vec![1, 5, 20],
c: Addr::new([8u8; 32]),
d: 127,
};
let mut vec = Vec::new();
vec.push(bar1.clone());
vec.push(bar2.clone());
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
serializer.encode_vec(&vec).unwrap();
let serialized_bytes = serializer.get_output();
let de_vec: Vec<Bar> = SimpleDeserializer::deserialize(&serialized_bytes).unwrap();
assert_eq!(2, de_vec.len());
assert_eq!(bar1, de_vec[0]);
assert_eq!(bar2, de_vec[1]);
// test Vec<T> implementation
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
serializer.encode_struct(&vec).unwrap();
let serialized_bytes = serializer.get_output();
let de_vec: Vec<Bar> = SimpleDeserializer::deserialize(&serialized_bytes).unwrap();
assert_eq!(2, de_vec.len());
assert_eq!(bar1, de_vec[0]);
assert_eq!(bar2, de_vec[1]);
}
#[test]
fn test_vec_impl() {
let mut vec: Vec<i32> = Vec::new();
vec.push(std::i32::MIN);
vec.push(std::i32::MAX);
vec.push(100);
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
serializer.encode_struct(&vec).unwrap();
let serialized_bytes = serializer.get_output();
let de_vec: Vec<i32> = SimpleDeserializer::deserialize(&serialized_bytes).unwrap();
assert_eq!(vec, de_vec);
}
#[test]
fn test_vectors_1() {
let bar = Bar {
a: 100,
b: vec![0, 1, 2, 3, 4, 5, 6, 7, 8],
c: Addr::new([5u8; 32]),
d: 99,
};
let mut map = BTreeMap::new();
map.insert(vec![0, 56, 21], vec![22, 10, 5]);
map.insert(vec![1], vec![22, 21, 67]);
map.insert(vec![20, 21, 89, 105], vec![201, 23, 90]);
let foo = Foo {
a: u64::max_value(),
b: vec![100, 99, 88, 77, 66, 55],
c: bar,
d: true,
e: map,
};
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
foo.serialize(&mut serializer).unwrap();
let serialized_bytes = serializer.get_output();
// make sure we serialize into exact same bytes as before
assert_eq!(TEST_VECTOR_1, hex::encode(serialized_bytes));
// make sure we can deserialize the test vector into expected struct
let test_vector_bytes = hex::decode(TEST_VECTOR_1).unwrap();
let deserialized_foo: Foo = SimpleDeserializer::deserialize(&test_vector_bytes).unwrap();
assert_eq!(foo, deserialized_foo);
}
#[test]
fn test_serialization_failure_cases() {
// a vec longer than representable range should result in failure
let bar = Bar {
a: 100,
b: vec![0; i32::max_value() as usize + 1],
c: Addr::new([0u8; 32]),
d: 222,
};
let mut serializer = SimpleSerializer::<Vec<u8>>::new();
assert!(bar.serialize(&mut serializer).is_err());
}
#[test]
fn test_deserialization_failure_cases() {
// invalid length prefix should fail on all decoding methods
let bytes_len_2 = vec![0; 2];
let mut deserializer = SimpleDeserializer::new(&bytes_len_2);
assert!(deserializer.clone().decode_u64().is_err());
assert!(deserializer.clone().decode_bytes_with_len(32).is_err());
assert!(deserializer.clone().decode_variable_length_bytes().is_err());
assert!(deserializer.clone().decode_struct::<Foo>().is_err());
assert!(Foo::deserialize(&mut deserializer.clone()).is_err());
// a length prefix longer than maximum allowed should fail
let mut long_bytes = Vec::new();
long_bytes
.write_u32::<Endianness>(ARRAY_MAX_LENGTH as u32 + 1)
.unwrap();
deserializer = SimpleDeserializer::new(&long_bytes);
assert!(deserializer.clone().decode_variable_length_bytes().is_err());
// vec not long enough should fail
let mut bytes_len_10 = Vec::new();
bytes_len_10.write_u32::<Endianness>(32).unwrap();
deserializer = SimpleDeserializer::new(&bytes_len_10);
assert!(deserializer.clone().decode_variable_length_bytes().is_err());
assert!(deserializer.clone().decode_bytes_with_len(32).is_err());
// malformed struct should fail
let mut some_bytes = Vec::new();
some_bytes.write_u64::<Endianness>(10).unwrap();
some_bytes.write_u32::<Endianness>(50).unwrap();
deserializer = SimpleDeserializer::new(&some_bytes);
assert!(deserializer.clone().decode_struct::<Foo>().is_err());
// malformed encoded bytes with length prefix larger than real
let mut evil_bytes = Vec::new();
evil_bytes.write_u32::<Endianness>(500).unwrap();
evil_bytes.resize_with(4 + 499, Default::default);
deserializer = SimpleDeserializer::new(&evil_bytes);
assert!(deserializer.clone().decode_variable_length_bytes().is_err());
// malformed encoded bool with value not 0 or 1
let mut bool_bytes = Vec::new();
bool_bytes.write_u8(2).unwrap();
deserializer = SimpleDeserializer::new(&bool_bytes);
assert!(deserializer.clone().decode_bool().is_err());
}
| Bar |
main.ts | $(document).on('ready', () => { | //write javascript code here
}); |
|
test_regexp_basics_is_it_ipv4_address.py | import unittest
from katas.kyu_6.regexp_basics_is_it_ipv4_address import ipv4_address
class IPV4AddressTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(ipv4_address('127.0.0.1'))
def test_true_2(self):
self.assertTrue(ipv4_address('0.0.0.0'))
def test_true_3(self):
self.assertTrue(ipv4_address('255.255.255.255'))
def test_true_4(self):
self.assertTrue(ipv4_address('10.20.30.40'))
def | (self):
self.assertFalse(ipv4_address(''))
def test_false_2(self):
self.assertFalse(ipv4_address('10.256.30.40'))
def test_false_3(self):
self.assertFalse(ipv4_address('10.20.030.40'))
def test_false_4(self):
self.assertFalse(ipv4_address('127.0.1'))
def test_false_5(self):
self.assertFalse(ipv4_address('127.0.0.0.1'))
def test_false_6(self):
self.assertFalse(ipv4_address('..255.255'))
def test_false_7(self):
self.assertFalse(ipv4_address('127.0.0.1\n'))
def test_false_8(self):
self.assertFalse(ipv4_address('\n127.0.0.1'))
def test_false_9(self):
self.assertFalse(ipv4_address(' 127.0.0.1'))
def test_false_10(self):
self.assertFalse(ipv4_address('127.0.0.1 '))
def test_false_11(self):
self.assertFalse(ipv4_address(' 127.0.0.1 '))
| test_false |
OptimizeSumDets.py | import scipy
import time
try:
import SloppyCell.Plotting as Plotting
except ImportError:
pass
def C(p, origMatrix, weightsRDP=0.,weights2D=0.,weightsLS=0.,weightPR=0.,weightPriors=0.,*args,**kwargs):
"""
p is list {a_{ij}, i: 1..n, j: i+1..n}
n is size of total matrix
"""
n = origMatrix.shape[0]
OrthMatrix = ProcessHalfMatrix(p)
newMat = transformMatrix(origMatrix,OrthMatrix)
cost=0.
if weightsRDP > 0.:
cost += weightsRDP*sumRowDotProdsOLD(newMat)
if weights2D > 0.:
cost += weights2D*sum2Determinants(newMat)
if weightsLS > 0.:
cost += weightsLS*sumLogSpacings(newMat)
if weightPR>0.:
cost += weightPR/(ParticipationRatio(OrthMatrix)**2.)
## cost += weightPR*(1.-ParticipationRatio(OrthMatrix)/n)
if weightPriors>0.:
cost +=weightPR*calcPriors(p)
return cost
def calcPriors(paramsList, pOpt=0.,pSigma=10.):
priorCost=0.
for param in paramsList:
priorCost += ((param-pOpt)/pSigma)**2.
return priorCost
def sumRowDotProdsOLD(origMatrix):
"""
Makes more sense to use on Jacobian than on Hessian.
"""
rowNormalized = normRows(origMatrix)
n = origMatrix.shape[0]
## sumDotProds = sum([abs((scipy.dot(rowNormalized[i],rowNormalized[i+1])))**(1./2.) for i in range(n-1)])
sumDotProds = sum([1.-(scipy.dot(rowNormalized[i],rowNormalized[i+1]))**2. for i in range(n-1)])
return sumDotProds
def sumRowDotProdsNEW(origMatrix):
"""
Makes more sense to use on Jacobian than on Hessian.
"""
rowNormalized = normRows(origMatrix)
n = rowNormalized.shape[0]
sumDotProds = sumAllDotProds(rowNormalized[0:n/2]) + sumAllDotProds(rowNormalized[n/2:n])
return sumDotProds
def sumAllDotProds(origMatrix):
n = origMatrix.shape[0]
sumDotProds=0.
for ii in range(n):
for jj in range(ii+1,n):
sumDotProds += 1.-scipy.dot(origMatrix[ii],origMatrix[jj])**2.
return sumDotProds
def sum2Determinants(matrixToSum):
n = matrixToSum.shape[0]/2
det1 = scipy.linalg.det(matrixToSum[0:n,0:n])
det2 = scipy.linalg.det(matrixToSum[n:n*2,n:n*2])
return scipy.absolute(det1)+scipy.absolute(det2)
def sumLogSpacings(matrixToSum):
n = matrixToSum.shape[0]/2
sv1 = scipy.linalg.svdvals(matrixToSum[0:n,0:n])
sv2 = scipy.linalg.svdvals(matrixToSum[n:n*2,n:n*2])
return sum(sv1[1:n]/sv1[0:n-1])+sum(sv2[1:n]/sv2[0:n-1])
def ParticipationRatio(origMatrix):
return scipy.sum(scipy.sum(origMatrix**4))
def ProcessFullMatrix(p):
"""
this is used if parameters define all elements of a matrix
"""
n = scipy.sqrt(len(p)).__int__()
pMat = scipy.reshape(p,(n,n))
orthMat = scipy.linalg.orth(pMat)
return orthMat
def ProcessHalfMatrix(p):
"""
this is used if parameters define upper right triangular portion of
skew-symmetric matrix.
Then performs a Cayley transformation of this skew-symmetric matrix.
"""
l = scipy.size(p)
n = scipy.round(((1.+scipy.sqrt(1+8.*l))/2.)).__int__()
Mfull = scipy.zeros((n,n),'d')
placeCounter=0
for i in range(n-1):
Mfull[i,(i+1):n] = p[placeCounter:(placeCounter+n-i-1)]
placeCounter += n-i-1
Mfull = Mfull - scipy.transpose(Mfull)
IMat = scipy.eye(n,'d')
return scipy.dot((IMat-Mfull),scipy.linalg.inv(IMat+Mfull))
## return scipy.linalg.expm(Mfull)
def ProcessQuarterMatrix(p):
"""
this is used if parameters define just upper right and lower left
quadrants of antisymmetric matrix.
"""
n= scipy.sqrt(scipy.size(p))
Mu = scipy.resize(p, (n,n))
Mfull = scipy.bmat([[scipy.zeros((n,n)),Mu],[-scipy.transpose(Mu),scipy.zeros((n,n))]])
return scipy.linalg.expm(Mfull)
def BestMatrix(origMatrix, p=None, weightsRDP=1., weights2D=0., weightsLS=0., weightPR=0., weightPriors=0., seed=None, *args, **kwargs):
if seed is not None:
scipy.random.seed(seed)
n = origMatrix.shape[0]
if p is None:
p = (scipy.random.random(n*(n-1)/2)-0.5)
pOpt = scipy.optimize.fmin(C, p, args=(origMatrix,weightsRDP, weights2D, weightsLS, weightPR, weightPriors), *args, **kwargs)
return C(pOpt, origMatrix, weightsRDP, weights2D, weightsLS, weightPR, weightPriors), pOpt
def ManyBest(origMatrix, numberTries=10, p=None,numOpts=10,weightsRDP=1., weights2D=0., weightsLS=0., weightPR=0., weightPriors=0., seed=None, *args, **kwargs):
return [OptimizeManyTimes(origMatrix,p,numOpts,weightsRDP, weights2D, weightsLS, weightPR, weightPriors, seed, *args, **kwargs) for i in range(numberTries)]
def OptimizeManyTimes(origMatrix,p=None,numOpts=10,weightsRDP=1., weights2D=0., weightsLS=0., weightPR=0., weightPriors=0., seed=None, *args, **kwargs):
C,pOpt = BestMatrix(origMatrix,p,weightsRDP,weights2D,weightsLS,weightPR,weightPriors,seed,*args,**kwargs)
for i in range(numOpts-1):
C,pOpt = BestMatrix(origMatrix,pOpt,weightsRDP,weights2D,weightsLS,weightPR,weightPriors,seed,*args,**kwargs)
return C, pOpt
def getGammas(numExps,distWidth=1.):
epsilons = scipy.random.random(numExps)-0.5
gammas = 1.+epsilons
return gammas
def getAmounts(numExps,distWidth=1.):
# amounts=scipy.ones(numExps,'d')
amounts = scipy.random.random(numExps)-0.5
amounts = 1.+amounts
return amounts
def netRadiation(gammas,amounts,time):
|
def getHessFull(gammas=None,amounts=None,numExps=3):
if gammas is None:
gammas = getGammas(numExps)
if amounts is None:
amounts = getAmounts(numExps)
numExps=scipy.size(gammas)
hessgg = getHessGG(gammas)
hessAA = getHessAA(amounts,gammas)
numerAg1 = -scipy.outer(amounts,amounts*gammas)
denomAg1 = scipy.outer(scipy.ones(numExps),gammas)
denomAg2 = scipy.transpose(denomAg1)+denomAg1
denomAg3 = denomAg2*denomAg2
hessAg = numerAg1/denomAg3
numergA1 = -scipy.outer(amounts*gammas,amounts)
denomgA1 = scipy.outer(gammas,scipy.ones(numExps))
denomgA2 = scipy.transpose(denomgA1)+denomgA1
denomgA3 = denomgA2*denomgA2
hessgA = numergA1/denomgA3
hessFull = scipy.bmat([[hessAA,hessAg],[hessgA,hessgg]])
return hessFull
def getHessFullLogTime(gammas=None,amounts=None,numExps=3):
"""use this routine for new paper"""
if gammas is None:
gammas = getGammas(numExps)
if amounts is None:
amounts = getAmounts(numExps)
numExps=scipy.size(gammas)
amountsLess1 = amounts[0:-1]
gammasLess1 = gammas[0:-1]
hessgg = getHessGGLogTime(gammas=gammas,amounts=amounts)
hessAA = getHessAALogTime(amounts=amounts,gammas=gammas)
numerAg1 = scipy.array(-2.*scipy.outer(amountsLess1,amounts*gammas))
denomAg1 = scipy.array(scipy.outer(gammasLess1,scipy.ones(numExps)))
denomAg2 = scipy.array(scipy.outer(scipy.ones(numExps-1),gammas))
denomAg3 = scipy.array(denomAg1+denomAg2)
denomAg4 = denomAg2+gammas[-1]
hessAg = scipy.array(numerAg1/denomAg3)-scipy.array(numerAg1/denomAg4)
hessgA = scipy.transpose(hessAg)
hessFull = scipy.array(scipy.bmat([[hessAA,hessAg],[hessgA,hessgg]]))
return hessFull
def getHessGG(gammas=None,numExps=3):
if gammas is None:
numExps=6
gammas = scipy.array([1.,1.2,1.4,1001.,1005.,997.])
numExps = scipy.size(gammas)
numergg1 = 2.*scipy.outer(gammas, gammas)
denomgg1 = scipy.outer(scipy.ones(numExps),gammas)
denomgg2 = scipy.transpose(denomgg1)+denomgg1
denomgg3 = denomgg2*denomgg2*denomgg2
hessgg = numergg1/denomgg3
return hessgg
def getHessGGLogTime(gammas=None,amounts=None,numExps=3):
"""use this routine for the new paper"""
if gammas is None:
numExps=6
gammas = scipy.array([1.,1.2,1.4,1001.,1005.,997.])
if amounts is None:
amounts = scipy.ones(numExps,'d')
numExps = scipy.size(gammas)
numergg1 = scipy.array(2.*scipy.outer(gammas*amounts, gammas*amounts))
denomgg1 = scipy.array(scipy.outer(scipy.ones(numExps),gammas))
denomgg2 = scipy.array(scipy.transpose(denomgg1)+denomgg1)
denomgg3 = scipy.array(denomgg2*denomgg2)
hessgg = scipy.array(numergg1/denomgg3)
return hessgg
def getHessAA(amounts=None,gammas=None,numExps=3):
if amounts is None:
amounts = getAmounts(numExps)
if gammas is None:
gammas = getGammas(numExps)
numExps = scipy.size(amounts)
numerAA1 = scipy.outer(amounts,amounts)
denomAA1 = scipy.outer(scipy.ones(numExps),gammas)
denomAA2 = scipy.transpose(denomAA1)+denomAA1
hessAA = numerAA1/denomAA2
return hessAA
def getHessAALogTime(amounts=None,gammas=None,numExps=3):
"""use this routine for the new paper"""
if amounts is None:
amounts = getAmounts(numExps)
if gammas is None:
gammas = getGammas(numExps)
numExps = scipy.size(amounts)
amountsLess1 = amounts[0:-1]
gammasLess1 = gammas[0:-1]
numerAA1 = scipy.array(scipy.outer(amountsLess1,amountsLess1))
numerAA2 = scipy.array(scipy.outer(gammasLess1+gammas[-1],scipy.ones(numExps-1)))
numerAA3 = scipy.array(scipy.outer(scipy.ones(numExps-1),gammasLess1+gammas[-1]))
denomAA1 = scipy.array(scipy.outer(scipy.ones(numExps-1),2*gammas[-1]*gammasLess1))
denomAA2 = scipy.array(scipy.transpose(denomAA1)+denomAA1)
hessAA = 2.*scipy.array(numerAA1*scipy.log(numerAA2*numerAA3/denomAA2))
return hessAA
def getJacobian(times, amounts=None, gammas=None):
numGammas=0
numAmounts=0
if gammas is not None:
numGammas = scipy.size(gammas)
if amounts is not None:
numAmounts = scipy.size(amounts)
Jacobian = scipy.zeros((numGammas+numAmounts,scipy.size(times)),'d')
if numAmounts > 0:
Jacobian[0:numAmounts] = getJacobianA(times,amounts,gammas)
if numGammas > 0:
Jacobian[numAmounts:numAmounts+numGammas] = getJacobianG(times,gammas,amounts)
return Jacobian
def getJacobianLogTime(times, amounts=None, gammas=None):
numGammas=0
numAmounts=0
if gammas is not None:
numGammas = scipy.size(gammas)
if amounts is not None:
numAmounts = scipy.size(amounts)
JacobianLogTime = scipy.zeros((numGammas+numAmounts,scipy.size(times)),'d')
if numAmounts > 0:
JacobianLogTime[0:numAmounts] = getJacobianALogTime(times,amounts,gammas)
if numGammas > 0:
JacobianLogTime[numAmounts:numAmounts+numGammas] = getJacobianGLogTime(times,gammas,amounts)
return JacobianLogTime
def getJacobianLog(times,gammas,amounts=None):
"""This is the routine to use for new paper (with times exponentially
distributed."""
numExps = scipy.size(gammas)
if amounts is None:
amounts = scipy.ones(numExps,'d')
jacG = getJacobianLogG(times,gammas,amounts)
jacA = getJacobianLogA(times,gammas,amounts)
return scipy.transpose(scipy.array(scipy.bmat([[jacA],[jacG]])))
def getJacobianLogG(times,gammas,amounts=None):
"""This is the routine to use for new paper (with times exponentially
distributed."""
numExps = scipy.size(gammas)
if amounts is None:
amounts = scipy.ones(numExps,'d')
JacobianLogG = scipy.array([gammas[j]*amounts[j]*(-times)*scipy.exp(-gammas[j]*times) for j in range(numExps)])
return JacobianLogG
def getJacobianLogA(times,gammas,amounts):
"""This is the routine to use for new paper (with times exponentially
distributed."""
numExps = scipy.size(gammas)
JacobianLogA = scipy.array([amounts[j]*(scipy.exp(-gammas[j]*times)-scipy.exp(-gammas[-1]*times)) for j in range(numExps-1)])
return JacobianLogA
def getJacobianG(times,gammas,amounts=None):
numGammas = scipy.size(gammas)
if amounts is None:
amounts = scipy.ones(numGammas,'d')
JacobianG = scipy.array([-amounts[i]*gammas[i]*times*scipy.exp(-gammas[i]*times) for i in range(numGammas)])
return JacobianG
def getJacobianGLogTime(times,gammas,amounts=None):
numGammas = scipy.size(gammas)
if amounts is None:
amounts = scipy.ones(numGammas,'d')
JacobianGLogTime = scipy.array([-amounts[i]*gammas[i]*scipy.sqrt(times)*scipy.exp(-gammas[i]*times) for i in range(numGammas)])
return JacobianGLogTime
def getJacobianA(times,amounts,gammas=None):
numAmounts = scipy.size(amounts)
if gammas is None:
gammas = scipy.zeros(numAmounts,'d')
JacobianA = scipy.array([amounts[i]*scipy.exp(-gammas[i]*times) for i in range(numAmounts)])
return JacobianA
def getJacobianALogTime(times,amounts,gammas=None):
numAmounts = scipy.size(amounts)
if gammas is None:
gammas = scipy.zeros(numAmounts,'d')
JacobianALogTime = scipy.array([(amounts[i]/scipy.sqrt(times))*scipy.exp(-gammas[i]*times) for i in range(numAmounts)])
return JacobianALogTime
def normRows(matrixToPlot):
"""
Useful for plotting and otherwise comparing alignment of rows of matrices.
Be careful that if vec[1] is zero, entire row gets zerod.
"""
return scipy.array([scipy.sign(vec[1])*vec/scipy.sqrt(scipy.dot(vec,vec)) for vec in matrixToPlot])
def transformMatrix(origMatrix, transformation):
n, m =origMatrix.shape
if n == m:
newMat = scipy.dot(transformation,scipy.dot(origMatrix,scipy.transpose(transformation)))
else:
newMat = scipy.dot(transformation,origMatrix)
return newMat
def findPermutation(permMat):
"""
In so far as permMat is a permutation matrix, returns the permutation.
"""
maxs = [(vec.tolist().index(max(vec)), max(vec)) for vec in permMat]
mins = [(vec.tolist().index(min(vec)), min(vec)) for vec in permMat]
for ii in range(len(maxs)):
if maxs[ii][1] < -mins[ii][1]:
maxs[ii] = mins[ii]
return maxs
def makePermutationMatrix(permList):
"""
Takes a list defining the permutation and makes the appropriate matrix.
"""
permList = scipy.array(permList)
n = len(permList)
if 0 not in permList:
permList = permList - 1
permMat = scipy.zeros((n,n),'d')
for ii, jj in enumerate(permList):
permMat[ii,jj] = 1.
return permMat
def timeCostEval(mat, p, numEvals=100):
start = time.time()
for ii in range(numEvals):
Cost = C(p,mat)
stop = time.time()
print "time per eval:", (stop-start)/numEvals
return (stop-start)/numEvals
def timeGammas(listNumGammas, numEvals=1000):
timesToCalc = []
for numGammas in listNumGammas:
gammas = getGammas(numGammas)
jac = getJacobianG(gammas=gammas,times=scipy.arange(500.))
p = scipy.random.random(numGammas*(numGammas-1)/2)-0.5
timePerEval = timeCostEval(jac,p,numEvals)
timesToCalc.append(timePerEval)
return timesToCalc
def plotLevels(levels,offset=0):
xs = [offset+0.6,offset+1.4]
for lvl in levels:
Plotting.semilogy(xs,[lvl,lvl],'k')
return
def eVec(k,n):
eVec = scipy.zeros(n,'d')
eVec[k] = 1.
return eVec
def sLSAlongP(origMatrix,params,k,deltaP):
sLSList = [sumLogSpacings(transformMatrix(origMatrix,ProcessHalfMatrix(params+x*params[k]*eVec(k,len(params))))) for x in scipy.arange(-deltaP,deltaP,deltaP/1000)]
return sLSList
def PRAlongP(params,k,deltaP):
n = len(params)
PRList = [1./ParticipationRatio(ProcessHalfMatrix(params+x*params[k]*eVec(k,n))) for x in scipy.arange(-deltaP,deltaP,deltaP/1000)]
## PRList = [1./ParticipationRatio(transformMatrix(origMatrix,ProcessHalfMatrix(params+x*params[k]*eVec(k,n)))) for x in scipy.arange(-deltaP,deltaP,deltaP/100)]
## PRList = [1.-ParticipationRatio(transformMatrix(origMatrix,ProcessHalfMatrix(params+x*params[k]*eVec(k,n))))/n for x in scipy.arange(-deltaP,deltaP,deltaP/100)]
return PRList
def CostAlongP(origMatrix,params,k,deltaP,weightsRDP=0.,weights2D=0.,weightsLS=0.,weightPR=0.,weightPriors=0.):
n = len(params)
CostList = [C(params+x*params[k]*eVec(k,n),origMatrix, weightsRDP,weights2D,weightsLS,weightPR,weightPriors) for x in scipy.arange(-deltaP,deltaP,deltaP/100)]
return CostList
| netR = amounts*scipy.exp(-gammas*time)
return scipy.sum(netR) |
cloud_settings_settings_sleep_timeout_cloud_garbage_collection.py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CloudSettingsSettingsSleepTimeoutCloudGarbageCollection(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'recovery_with_tasks': 'float',
'recovery_without_tasks': 'float',
'with_tasks': 'float',
'without_tasks': 'float'
} | 'with_tasks': 'with_tasks',
'without_tasks': 'without_tasks'
}
def __init__(self, recovery_with_tasks=None, recovery_without_tasks=None, with_tasks=None, without_tasks=None): # noqa: E501
"""CloudSettingsSettingsSleepTimeoutCloudGarbageCollection - a model defined in Swagger""" # noqa: E501
self._recovery_with_tasks = None
self._recovery_without_tasks = None
self._with_tasks = None
self._without_tasks = None
self.discriminator = None
if recovery_with_tasks is not None:
self.recovery_with_tasks = recovery_with_tasks
if recovery_without_tasks is not None:
self.recovery_without_tasks = recovery_without_tasks
if with_tasks is not None:
self.with_tasks = with_tasks
if without_tasks is not None:
self.without_tasks = without_tasks
@property
def recovery_with_tasks(self):
"""Gets the recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a recovery thread with pending tasks # noqa: E501
:return: The recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._recovery_with_tasks
@recovery_with_tasks.setter
def recovery_with_tasks(self, recovery_with_tasks):
"""Sets the recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a recovery thread with pending tasks # noqa: E501
:param recovery_with_tasks: The recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._recovery_with_tasks = recovery_with_tasks
@property
def recovery_without_tasks(self):
"""Gets the recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a recovery thread without pending tasks # noqa: E501
:return: The recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._recovery_without_tasks
@recovery_without_tasks.setter
def recovery_without_tasks(self, recovery_without_tasks):
"""Sets the recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a recovery thread without pending tasks # noqa: E501
:param recovery_without_tasks: The recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._recovery_without_tasks = recovery_without_tasks
@property
def with_tasks(self):
"""Gets the with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a non-recovery thread with pending tasks # noqa: E501
:return: The with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._with_tasks
@with_tasks.setter
def with_tasks(self, with_tasks):
"""Sets the with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a non-recovery thread with pending tasks # noqa: E501
:param with_tasks: The with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._with_tasks = with_tasks
@property
def without_tasks(self):
"""Gets the without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a non-recovery thread without pending tasks # noqa: E501
:return: The without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._without_tasks
@without_tasks.setter
def without_tasks(self, without_tasks):
"""Sets the without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a non-recovery thread without pending tasks # noqa: E501
:param without_tasks: The without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._without_tasks = without_tasks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudSettingsSettingsSleepTimeoutCloudGarbageCollection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other |
attribute_map = {
'recovery_with_tasks': 'recovery_with_tasks',
'recovery_without_tasks': 'recovery_without_tasks', |
live_predict.py | # Unsure majority of time but more correct then wrong when thinking of
# Requires more data for training
from data import *
from tkinter import *
from keras.models import load_model
import numpy as np
import threading
import time
# Time variables
start_wait = 10000
wait = 2100
# Set dimensions | root = Tk()
root.geometry(str(w)+'x'+str(h))
root.title('Predictor')
graphing_area = Canvas(root, width=w, height=h)
graphing_area.pack()
# Import model to be used
saved_model = load_model('model.h5')
# Begin data thread
thread = threading.Thread(target=data_loop, args=[False, False, False, 1, False])
thread.start()
# Predicts the input values and returns predicted letter
def predict(values, model):
processed_data = np.expand_dims(np.array([np.abs(np.fft.rfft(np.array(values)))/85000]), 3)
prediction = model.predict(processed_data)
print(prediction[0][0])
if prediction[0][0] < 0.1:
return 'B'
elif prediction[0][0] > 0.9:
return 'A'
else:
return '?'
def display_prediction(canvas, frame, model):
prediction = predict(last_values[-1500:], model)
canvas.delete('all')
canvas.create_text(w / 2, h / 2, font="Arial " + str(int(round(h / 3, 0))), text='Collecting...', anchor='center')
time.sleep(1)
canvas.delete('all')
canvas.create_text(w / 2, h / 2, font="Arial " + str(int(round(h / 3, 0))), text=prediction, anchor='center')
root.after(wait, display_prediction, canvas, frame, model)
root.after(start_wait, display_prediction, graphing_area, root, saved_model)
root.mainloop() | w = 900
h = 556
|
diff.rs | // Copyright 2022 Nydus Developer. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::io::Write;
use std::path::{Path, PathBuf};
use anyhow::Result;
use nydus_utils::{digest::Algorithm, digest::RafsDigest, exec};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use vmm_sys_util::tempdir::TempDir;
use nydus_rafs::metadata::layout::RAFS_ROOT_INODE;
use nydus_rafs::metadata::{RafsInode, RafsMode, RafsSuper};
use nydus_rafs::RafsIoReader;
use nydus_storage::device::BlobChunkInfo;
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
pub struct BuildOutputBlob {
pub blob_id: String,
pub blob_size: u64,
}
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
pub struct BuildOutputArtifact {
pub bootstrap_name: String,
pub blobs: Vec<BuildOutputBlob>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct OutputSerializer {
version: String,
artifacts: Vec<BuildOutputArtifact>,
blobs: Vec<String>,
trace: serde_json::Map<String, serde_json::Value>,
}
struct Mounter {
mountpoint: PathBuf,
}
impl Mounter {
fn mount(mut layer_paths: Vec<&Path>, target_dir: &Path) -> Self |
}
impl Drop for Mounter {
fn drop(&mut self) {
exec(&format!("umount {:?}", self.mountpoint), true).unwrap();
}
}
fn create_dir(path: &Path) -> PathBuf {
fs::create_dir_all(path).unwrap();
path.to_owned()
}
fn create_file(path: &Path, chunks: &[Vec<u8>]) {
let mut file = File::create(path).unwrap();
for chunk in chunks {
file.write_all(chunk).unwrap();
}
}
fn join_string(paths: Vec<&Path>, sep: &str) -> String {
paths
.iter()
.map(|p| p.to_str().unwrap().to_string())
.collect::<Vec<String>>()
.join(sep)
}
struct Skip {
// for option --diff-skip-layer
diff_skip_layer: usize,
// for option --parent-bootstrap
parent_bootstrap: PathBuf,
}
fn diff_build(
work_dir: &Path,
snapshot_paths: Vec<&Path>,
layer_paths: Vec<&Path>,
with_diff_hint: bool,
chunk_dict_bootstrap: Option<&Path>,
skip: Option<Skip>,
) {
let builder = std::env::var("NYDUS_IMAGE")
.unwrap_or_else(|_| String::from("./target-fusedev/release/nydus-image"));
let output_path = work_dir.join("output.json");
let bootstraps_path = create_dir(&work_dir.join("bootstraps"));
let blobs_path = create_dir(&work_dir.join("blobs"));
let cmd = format!(
"
{} create --log-level warn \
--output-json {} \
--compressor none \
--chunk-size 0x1000 {} {} \
--diff-bootstrap-dir {} \
--blob-dir {} \
--source-type diff {} {} {}
",
builder,
output_path.to_str().unwrap(),
chunk_dict_bootstrap
.map(|p| format!("--chunk-dict {}", p.to_str().unwrap()))
.unwrap_or_default(),
if let Some(skip) = skip {
format!(
"--diff-skip-layer {} --parent-bootstrap {}",
skip.diff_skip_layer,
skip.parent_bootstrap.to_str().unwrap(),
)
} else {
String::new()
},
bootstraps_path.to_str().unwrap(),
blobs_path.to_str().unwrap(),
if with_diff_hint {
"--diff-overlay-hint"
} else {
""
},
join_string(snapshot_paths, " "),
if with_diff_hint {
join_string(layer_paths, " ")
} else {
String::new()
},
);
exec(&cmd, false).unwrap();
}
fn generate_chunks(num: usize) -> (Vec<Vec<u8>>, Vec<String>) {
let mut chunks = Vec::new();
let mut digests = Vec::new();
for _ in 0..num {
let chunk = (0..0x1000)
.map(|_| rand::random::<u8>())
.collect::<Vec<u8>>();
let digest = RafsDigest::from_buf(&chunk, Algorithm::Blake3);
chunks.push(chunk);
digests.push(format!("{}", digest));
}
(chunks, digests)
}
fn calc_blob_id(data: &[Vec<u8>]) -> String {
let mut digest = Sha256::new();
for d in data {
digest.update(d);
}
format!("{:x}", digest.finalize())
}
#[test]
fn integration_test_diff_build_with_chunk_dict() {
let tmp_dir_prefix =
std::env::var("TEST_WORKDIR_PREFIX").expect("Please specify `TEST_WORKDIR_PREFIX` env");
let tmp_dir = TempDir::new_with_prefix(format!("{}/", tmp_dir_prefix)).unwrap();
let mut mounts = Vec::new();
// ---------------------------------------------------
// Diff build to chunk-dict bootstrap
// Create layer 1
let layer_dir_1 = create_dir(&tmp_dir.as_path().join("layer-1"));
let (layer_1_chunks_1, layer_1_chunk_digests_1) = generate_chunks(2);
create_file(&layer_dir_1.join("file-1"), &layer_1_chunks_1);
let (layer_1_chunks_2, layer_1_chunk_digests_2) = generate_chunks(1);
create_file(&layer_dir_1.join("file-2"), &layer_1_chunks_2);
let blob_layer_1_digest =
calc_blob_id(&[layer_1_chunks_1.clone(), layer_1_chunks_2.clone()].concat());
// Create snapshot 1
// Equals with layer-1, so nothing to do
let snapshot_dir_1 = layer_dir_1.clone();
// Create layer 2 (dump same blob with layer 1)
let layer_dir_2 = create_dir(&tmp_dir.as_path().join("layer-2"));
create_file(&layer_dir_2.join("file-3"), &layer_1_chunks_1);
create_file(&layer_dir_2.join("file-4"), &layer_1_chunks_2);
// Create snapshot 2
let snapshot_dir_2 = create_dir(&tmp_dir.as_path().join("snapshot-2"));
mounts.push(Mounter::mount(
vec![&layer_dir_1, &layer_dir_2],
&snapshot_dir_2,
));
// Create layer 3 (dump part of the same chunk with layer 1)
let layer_dir_3 = create_dir(&tmp_dir.as_path().join("layer-3"));
create_file(&layer_dir_3.join("file-5"), &[layer_1_chunks_1[1].clone()]);
let (layer_3_chunks_1, layer_3_chunk_digests_1) = generate_chunks(1);
create_file(&layer_dir_3.join("file-6"), &layer_3_chunks_1);
let blob_layer_3_digest =
calc_blob_id(&[vec![layer_1_chunks_1[1].clone()], layer_3_chunks_1.clone()].concat());
// Create snapshot 3
let snapshot_dir_3 = create_dir(&tmp_dir.as_path().join("snapshot-3"));
mounts.push(Mounter::mount(
vec![&layer_dir_1, &layer_dir_2, &layer_dir_3],
&snapshot_dir_3,
));
// Create layer 4 (dump empty blob)
let layer_dir_4 = create_dir(&tmp_dir.as_path().join("layer-4"));
create_file(&layer_dir_4.join("file-7"), &Vec::new());
// Create snapshot 4
let snapshot_dir_4 = create_dir(&tmp_dir.as_path().join("snapshot-4"));
mounts.push(Mounter::mount(
vec![&layer_dir_1, &layer_dir_2, &layer_dir_3, &layer_dir_4],
&snapshot_dir_4,
));
let expected_chunk_dict_bootstrap = [
(PathBuf::from("/"), vec![]),
(
PathBuf::from("/file-1"),
vec![
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[0].clone(),
),
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-2"),
vec![(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
)],
),
(
PathBuf::from("/file-3"),
vec![
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[0].clone(),
),
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-4"),
vec![(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
)],
),
(
PathBuf::from("/file-5"),
vec![(
1,
blob_layer_3_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
)],
),
(
PathBuf::from("/file-6"),
vec![(
1,
blob_layer_3_digest.to_string(),
layer_3_chunk_digests_1[0].clone(),
)],
),
(PathBuf::from("/file-7"), vec![]),
]
.iter()
.cloned()
.collect();
// Diff build to a chunk-dict bootstrap
let work_dir_1 = create_dir(&tmp_dir.as_path().join("workdir-1"));
diff_build(
&work_dir_1,
vec![
&snapshot_dir_1,
&snapshot_dir_2,
&snapshot_dir_3,
&snapshot_dir_4,
],
vec![&layer_dir_1, &layer_dir_2, &layer_dir_3, &layer_dir_4],
true,
None,
None,
);
// Check metadata for chunk-dict bootstrap
let file = OpenOptions::new()
.read(true)
.write(false)
.open(work_dir_1.join("bootstraps/bootstrap-3"))
.unwrap();
let mut rs = RafsSuper {
mode: RafsMode::Direct,
validate_digest: true,
..Default::default()
};
let mut reader = Box::new(file) as RafsIoReader;
rs.load(&mut reader).unwrap();
let mut actual = HashMap::new();
let blobs = rs.superblock.get_blob_infos();
rs.walk_dir(RAFS_ROOT_INODE, None, &mut |inode: &dyn RafsInode,
path: &Path|
-> Result<()> {
let mut chunks = Vec::new();
if inode.is_reg() {
inode
.walk_chunks(&mut |chunk: &dyn BlobChunkInfo| -> Result<()> {
chunks.push((
chunk.blob_index(),
blobs[chunk.blob_index() as usize].blob_id().to_string(),
format!("{}", chunk.chunk_id()),
));
Ok(())
})
.unwrap();
}
actual.insert(path.to_path_buf(), chunks);
Ok(())
})
.unwrap();
// Verify chunk-dict bootstrap
assert_eq!(actual, expected_chunk_dict_bootstrap);
let mut file = File::open(&work_dir_1.join("output.json")).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
let output: OutputSerializer = serde_json::from_str(&contents).unwrap();
assert_eq!(output.artifacts.len(), 4);
assert_eq!(output.artifacts[3].bootstrap_name, "bootstrap-3");
assert_eq!(output.artifacts[3].blobs[0].blob_id, blob_layer_1_digest);
assert_eq!(output.artifacts[3].blobs[1].blob_id, blob_layer_3_digest);
// ---------------------------------------------------
// Diff build based on a chunk dict bootstrap
// Create layer 5 (includes some chunks in chunk-dict)
let layer_dir_5 = create_dir(&tmp_dir.as_path().join("layer-5"));
create_file(
&layer_dir_5.join("file-8"),
&[layer_1_chunks_1[1].clone(), layer_1_chunks_2[0].clone()],
);
let (layer_5_chunks_1, layer_5_chunk_digests_1) = generate_chunks(2);
create_file(&layer_dir_5.join("file-9"), &layer_5_chunks_1);
let blob_layer_5_digest = calc_blob_id(&layer_5_chunks_1);
// Create snapshot 5
// Equals with layer-5, so nothing to do
let snapshot_dir_5 = layer_dir_5.clone();
// Create layer 6 (includes some chunks in chunk-dict)
let layer_dir_6 = create_dir(&tmp_dir.as_path().join("layer-6"));
let (layer_6_chunks_1, layer_6_chunk_digests_1) = generate_chunks(1);
let blob_layer_6_digest = calc_blob_id(&layer_6_chunks_1);
create_file(
&layer_dir_6.join("file-10"),
&[layer_6_chunks_1[0].clone(), layer_3_chunks_1[0].clone()],
);
// Create snapshot 6
let snapshot_dir_6 = create_dir(&tmp_dir.as_path().join("snapshot-6"));
mounts.push(Mounter::mount(
vec![&layer_dir_5, &layer_dir_6],
&snapshot_dir_6,
));
let expected_bootstrap = [
(PathBuf::from("/"), vec![]),
(
PathBuf::from("/file-8"),
vec![
(
2,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
),
(
2,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
),
],
),
(
PathBuf::from("/file-9"),
vec![
(
3,
blob_layer_5_digest.to_string(),
layer_5_chunk_digests_1[0].clone(),
),
(
3,
blob_layer_5_digest.to_string(),
layer_5_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-10"),
vec![
(
0,
blob_layer_6_digest.to_string(),
layer_6_chunk_digests_1[0].clone(),
),
(
1,
blob_layer_3_digest.to_string(),
layer_3_chunk_digests_1[0].clone(),
),
],
),
]
.iter()
.cloned()
.collect();
// Diff build based on a chunk dict bootstrap
let chunk_dict_bootstrap_path = &work_dir_1.join("bootstraps/bootstrap-3");
let work_dir_2 = create_dir(&tmp_dir.as_path().join("workdir-2"));
diff_build(
&work_dir_2,
vec![&snapshot_dir_5, &snapshot_dir_6],
vec![&layer_dir_5, &layer_dir_6],
true,
Some(chunk_dict_bootstrap_path),
None,
);
// Check metadata for bootstrap
let file = OpenOptions::new()
.read(true)
.write(false)
.open(work_dir_2.join("bootstraps/bootstrap-1"))
.unwrap();
let mut rs = RafsSuper {
mode: RafsMode::Direct,
validate_digest: true,
..Default::default()
};
let mut reader = Box::new(file) as RafsIoReader;
rs.load(&mut reader).unwrap();
let mut actual = HashMap::new();
let blobs = rs.superblock.get_blob_infos();
rs.walk_dir(RAFS_ROOT_INODE, None, &mut |inode: &dyn RafsInode,
path: &Path|
-> Result<()> {
let mut chunks = Vec::new();
if inode.is_reg() {
inode
.walk_chunks(&mut |chunk: &dyn BlobChunkInfo| -> Result<()> {
chunks.push((
chunk.blob_index(),
blobs[chunk.blob_index() as usize].blob_id().to_string(),
format!("{}", chunk.chunk_id()),
));
Ok(())
})
.unwrap();
}
actual.insert(path.to_path_buf(), chunks);
Ok(())
})
.unwrap();
// Verify bootstrap
assert_eq!(actual, expected_bootstrap);
let mut file = File::open(&work_dir_2.join("output.json")).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
let output: OutputSerializer = serde_json::from_str(&contents).unwrap();
assert_eq!(output.artifacts.len(), 2);
assert_eq!(output.artifacts[1].bootstrap_name, "bootstrap-1");
assert_eq!(output.artifacts[1].blobs[0].blob_id, blob_layer_6_digest);
assert_eq!(output.artifacts[1].blobs[1].blob_id, blob_layer_3_digest);
assert_eq!(output.artifacts[1].blobs[2].blob_id, blob_layer_1_digest);
assert_eq!(output.artifacts[1].blobs[3].blob_id, blob_layer_5_digest);
// ---------------------------------------------------
// Diff build based on a build-cache + chunk-dict bootstrap
// Create layer 7
let layer_dir_7 = create_dir(&tmp_dir.as_path().join("layer-7"));
let (layer_7_chunks_1, layer_7_chunk_digests_1) = generate_chunks(2);
create_file(&layer_dir_7.join("file-11"), &layer_7_chunks_1);
let blob_layer_7_digest = calc_blob_id(&layer_7_chunks_1);
// Create layer 8 (includes some chunks in chunk-dict)
let layer_dir_8 = create_dir(&tmp_dir.as_path().join("layer-8"));
let (layer_8_chunks_1, layer_8_chunk_digests_1) = generate_chunks(1);
create_file(
&layer_dir_8.join("file-12"),
&[layer_8_chunks_1[0].clone(), layer_1_chunks_2[0].clone()],
);
let blob_layer_8_digest = calc_blob_id(&[layer_8_chunks_1[0].clone()]);
// Create snapshot 7
let snapshot_dir_7 = create_dir(&tmp_dir.as_path().join("snapshot-7"));
mounts.push(Mounter::mount(
vec![
&layer_dir_1,
&layer_dir_2,
&layer_dir_3,
&layer_dir_4,
&layer_dir_7,
],
&snapshot_dir_7,
));
// Create snapshot 8
let snapshot_dir_8 = create_dir(&tmp_dir.as_path().join("snapshot-8"));
mounts.push(Mounter::mount(
vec![
&layer_dir_1,
&layer_dir_2,
&layer_dir_3,
&layer_dir_4,
&layer_dir_7,
&layer_dir_8,
],
&snapshot_dir_8,
));
let expected_bootstrap = [
(PathBuf::from("/"), vec![]),
(
PathBuf::from("/file-1"),
vec![
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[0].clone(),
),
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-2"),
vec![(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
)],
),
(
PathBuf::from("/file-3"),
vec![
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[0].clone(),
),
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-4"),
vec![(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
)],
),
(
PathBuf::from("/file-5"),
vec![(
3,
blob_layer_3_digest.to_string(),
layer_1_chunk_digests_1[1].clone(),
)],
),
(
PathBuf::from("/file-6"),
vec![(
3,
blob_layer_3_digest.to_string(),
layer_3_chunk_digests_1[0].clone(),
)],
),
(PathBuf::from("/file-7"), vec![]),
(
PathBuf::from("/file-11"),
vec![
(
1,
blob_layer_7_digest.to_string(),
layer_7_chunk_digests_1[0].clone(),
),
(
1,
blob_layer_7_digest.to_string(),
layer_7_chunk_digests_1[1].clone(),
),
],
),
(
PathBuf::from("/file-12"),
vec![
(
2,
blob_layer_8_digest.to_string(),
layer_8_chunk_digests_1[0].clone(),
),
(
0,
blob_layer_1_digest.to_string(),
layer_1_chunk_digests_2[0].clone(),
),
],
),
]
.iter()
.cloned()
.collect();
// Diff build based on a build-cache + chunk-dict bootstrap
let chunk_dict_bootstrap_path = &work_dir_1.join("bootstraps/bootstrap-3");
let work_dir_3 = create_dir(&tmp_dir.as_path().join("workdir-3"));
diff_build(
&work_dir_3,
vec![
&snapshot_dir_1,
&snapshot_dir_2,
&snapshot_dir_3,
&snapshot_dir_4,
&snapshot_dir_7,
&snapshot_dir_8,
],
vec![
&layer_dir_1,
&layer_dir_2,
&layer_dir_3,
&layer_dir_4,
&layer_dir_7,
&layer_dir_8,
],
true,
Some(chunk_dict_bootstrap_path),
Some(Skip {
diff_skip_layer: 3,
parent_bootstrap: work_dir_1.join("bootstraps/bootstrap-3"),
}),
);
// Check metadata for bootstrap
let file = OpenOptions::new()
.read(true)
.write(false)
.open(work_dir_3.join("bootstraps/bootstrap-5"))
.unwrap();
let mut rs = RafsSuper {
mode: RafsMode::Direct,
validate_digest: true,
..Default::default()
};
let mut reader = Box::new(file) as RafsIoReader;
rs.load(&mut reader).unwrap();
let mut actual = HashMap::new();
let blobs = rs.superblock.get_blob_infos();
rs.walk_dir(RAFS_ROOT_INODE, None, &mut |inode: &dyn RafsInode,
path: &Path|
-> Result<()> {
let mut chunks = Vec::new();
if inode.is_reg() {
inode
.walk_chunks(&mut |chunk: &dyn BlobChunkInfo| -> Result<()> {
chunks.push((
chunk.blob_index(),
blobs[chunk.blob_index() as usize].blob_id().to_string(),
format!("{}", chunk.chunk_id()),
));
Ok(())
})
.unwrap();
}
actual.insert(path.to_path_buf(), chunks);
Ok(())
})
.unwrap();
// Verify bootstrap
assert_eq!(actual, expected_bootstrap);
let mut file = File::open(&work_dir_3.join("output.json")).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
let output: OutputSerializer = serde_json::from_str(&contents).unwrap();
assert_eq!(output.artifacts.len(), 2);
assert_eq!(output.artifacts[0].bootstrap_name, "bootstrap-4");
assert_eq!(output.artifacts[1].bootstrap_name, "bootstrap-5");
assert_eq!(output.artifacts[1].blobs[0].blob_id, blob_layer_1_digest);
assert_eq!(output.artifacts[1].blobs[1].blob_id, blob_layer_7_digest);
assert_eq!(output.artifacts[1].blobs[2].blob_id, blob_layer_8_digest);
assert_eq!(output.artifacts[1].blobs[3].blob_id, blob_layer_3_digest);
}
| {
layer_paths.reverse();
exec(
&format!(
"mount -t overlay -o lowerdir={} overlay {}",
join_string(layer_paths, ":"),
target_dir.to_str().unwrap(),
),
false,
)
.unwrap();
Self {
mountpoint: target_dir.to_path_buf(),
}
} |
client.go | package redisearch
import (
"errors"
"log"
"reflect"
"strconv"
"strings"
"github.com/gomodule/redigo/redis"
)
// Client is an interface to redisearch's redis commands
type Client struct {
pool ConnPool
name string
}
var maxConns = 500
// NewClient creates a new client connecting to the redis host, and using the given name as key prefix.
// Addr can be a single host:port pair, or a comma separated list of host:port,host:port...
// In the case of multiple hosts we create a multi-pool and select connections at random
func NewClient(addr, name string) *Client {
addrs := strings.Split(addr, ",")
var pool ConnPool
if len(addrs) == 1 {
pool = NewSingleHostPool(addrs[0])
} else {
pool = NewMultiHostPool(addrs)
}
ret := &Client{
pool: pool,
name: name,
}
return ret
}
// NewClientFromPool creates a new Client with the given pool and index name
func NewClientFromPool(pool *redis.Pool, name string) *Client {
ret := &Client{
pool: pool,
name: name,
}
return ret
}
// CreateIndex configures the index and creates it on redis
func (i *Client) CreateIndex(schema *Schema) (err error) {
return i.indexWithDefinition(i.name, schema, nil)
}
// CreateIndexWithIndexDefinition configures the index and creates it on redis
// IndexDefinition is used to define a index definition for automatic indexing on Hash update
func (i *Client) CreateIndexWithIndexDefinition(schema *Schema, definition *IndexDefinition) (err error) {
return i.indexWithDefinition(i.name, schema, definition)
}
// internal method
func (i *Client) indexWithDefinition(indexName string, schema *Schema, definition *IndexDefinition) (err error) {
args := redis.Args{indexName}
if definition != nil {
args = definition.Serialize(args)
}
// Set flags based on options
args, err = SerializeSchema(schema, args)
if err != nil {
return
}
conn := i.pool.Get()
defer conn.Close()
_, err = conn.Do("FT.CREATE", args...)
return
}
// AddField Adds a new field to the index.
func (i *Client) AddField(f Field) error {
args := redis.Args{i.name}
args = append(args, "SCHEMA", "ADD")
args, err := serializeField(f, args)
if err != nil {
return err
}
conn := i.pool.Get()
defer conn.Close()
_, err = conn.Do("FT.ALTER", args...)
return err
}
// Index indexes a list of documents with the default options
func (i *Client) Index(docs ...Document) error {
return i.IndexOptions(DefaultIndexingOptions, docs...)
}
// Search searches the index for the given query, and returns documents,
// the total number of results, or an error if something went wrong
func (i *Client) Search(q *Query) (docs []Document, total int, err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{i.name}
args = append(args, q.serialize()...)
res, err := redis.Values(conn.Do("FT.SEARCH", args...))
if err != nil {
return
}
if total, err = redis.Int(res[0], nil); err != nil {
return
}
docs = make([]Document, 0, len(res)-1)
skip := 1
scoreIdx := -1
fieldsIdx := -1
payloadIdx := -1
if q.Flags&QueryWithScores != 0 {
scoreIdx = 1
skip++
}
if q.Flags&QueryWithPayloads != 0 {
payloadIdx = skip
skip++
}
if q.Flags&QueryNoContent == 0 {
fieldsIdx = skip
skip++
}
if len(res) > skip {
for i := 1; i < len(res); i += skip {
if d, e := loadDocument(res, i, scoreIdx, payloadIdx, fieldsIdx); e == nil {
docs = append(docs, d)
} else {
log.Print("Error parsing doc: ", e)
}
}
}
return
}
// AliasAdd adds an alias to an index.
// Indexes can have more than one alias, though an alias cannot refer to another alias.
func (i *Client) AliasAdd(name string) (err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{name}.Add(i.name)
_, err = redis.String(conn.Do("FT.ALIASADD", args...))
return
}
// AliasDel deletes an alias from index.
func (i *Client) AliasDel(name string) (err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{name}
_, err = redis.String(conn.Do("FT.ALIASDEL", args...))
return
}
// AliasUpdate differs from the AliasAdd in that it will remove the alias association with
// a previous index, if any. AliasAdd will fail, on the other hand, if the alias is already
// associated with another index.
func (i *Client) AliasUpdate(name string) (err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{name}.Add(i.name)
_, err = redis.String(conn.Do("FT.ALIASUPDATE", args...))
return
}
// DictAdd adds terms to a dictionary.
func (i *Client) DictAdd(dictionaryName string, terms []string) (newTerms int, err error) {
conn := i.pool.Get()
defer conn.Close()
newTerms = 0
args := redis.Args{dictionaryName}.AddFlat(terms)
newTerms, err = redis.Int(conn.Do("FT.DICTADD", args...))
return
}
// DictDel deletes terms from a dictionary
func (i *Client) DictDel(dictionaryName string, terms []string) (deletedTerms int, err error) {
conn := i.pool.Get()
defer conn.Close()
deletedTerms = 0
args := redis.Args{dictionaryName}.AddFlat(terms)
deletedTerms, err = redis.Int(conn.Do("FT.DICTDEL", args...))
return
}
// DictDump dumps all terms in the given dictionary.
func (i *Client) DictDump(dictionaryName string) (terms []string, err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{dictionaryName}
terms, err = redis.Strings(conn.Do("FT.DICTDUMP", args...))
return
}
// SpellCheck performs spelling correction on a query, returning suggestions for misspelled terms,
// the total number of results, or an error if something went wrong
func (i *Client) SpellCheck(q *Query, s *SpellCheckOptions) (suggs []MisspelledTerm, total int, err error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{i.name}
args = append(args, q.serialize()...)
args = append(args, s.serialize()...)
res, err := redis.Values(conn.Do("FT.SPELLCHECK", args...))
if err != nil {
return
}
total = 0
suggs = make([]MisspelledTerm, 0)
// Each misspelled term, in turn, is a 3-element array consisting of
// - the constant string "TERM" ( 3-element position 0 -- we dont use it )
// - the term itself ( 3-element position 1 )
// - an array of suggestions for spelling corrections ( 3-element position 2 )
termIdx := 1
suggIdx := 2
for i := 0; i < len(res); i++ {
var termArray []interface{} = nil
termArray, err = redis.Values(res[i], nil)
if err != nil {
return
}
if d, e := loadMisspelledTerm(termArray, termIdx, suggIdx); e == nil {
suggs = append(suggs, d)
if d.Len() > 0 {
total++
}
} else {
log.Print("Error parsing misspelled suggestion: ", e)
}
}
return
}
// Deprecated: Use AggregateQuery() instead.
func (i *Client) Aggregate(q *AggregateQuery) (aggregateReply [][]string, total int, err error) {
res, err := i.aggregate(q)
// has no cursor
if !q.WithCursor {
total, aggregateReply, err = processAggReply(res)
// has cursor
} else {
var partialResults, err = redis.Values(res[0], nil)
if err != nil {
return aggregateReply, total, err
}
q.Cursor.Id, err = redis.Int(res[1], nil)
if err != nil {
return aggregateReply, total, err
}
total, aggregateReply, err = processAggReply(partialResults)
}
return
}
// AggregateQuery replaces the Aggregate() function. The reply is slice of maps, with values of either string or []string.
func (i *Client) AggregateQuery(q *AggregateQuery) (total int, aggregateReply []map[string]interface{}, err error) {
res, err := i.aggregate(q)
// has no cursor
if !q.WithCursor {
total, aggregateReply, err = processAggQueryReply(res)
// has cursor
} else {
var partialResults, err = redis.Values(res[0], nil)
if err != nil {
return total, aggregateReply, err
}
q.Cursor.Id, err = redis.Int(res[1], nil)
if err != nil {
return total, aggregateReply, err
}
total, aggregateReply, err = processAggQueryReply(partialResults)
}
return
}
func (i *Client) aggregate(q *AggregateQuery) (res []interface{}, err error) {
conn := i.pool.Get()
defer conn.Close()
validCursor := q.CursorHasResults()
if !validCursor {
args := redis.Args{i.name}
args = append(args, q.Serialize()...)
res, err = redis.Values(conn.Do("FT.AGGREGATE", args...))
} else {
args := redis.Args{"READ", i.name, q.Cursor.Id}
res, err = redis.Values(conn.Do("FT.CURSOR", args...))
}
if err != nil {
return
}
return
}
// Get - Returns the full contents of a document
func (i *Client) Get(docId string) (doc *Document, err error) {
doc = nil
conn := i.pool.Get()
defer conn.Close()
var reply interface{}
args := redis.Args{i.name, docId}
reply, err = conn.Do("FT.GET", args...)
if reply != nil {
var array_reply []interface{}
array_reply, err = redis.Values(reply, err)
if err != nil {
return
}
if len(array_reply) > 0 {
document := NewDocument(docId, 1)
document.loadFields(array_reply)
doc = &document
}
}
return
}
// MultiGet - Returns the full contents of multiple documents.
// Returns an array with exactly the same number of elements as the number of keys sent to the command.
// Each element in it is either an Document or nil if it was not found.
func (i *Client) MultiGet(documentIds []string) (docs []*Document, err error) {
docs = make([]*Document, len(documentIds))
conn := i.pool.Get()
defer conn.Close()
var reply interface{}
args := redis.Args{i.name}.AddFlat(documentIds)
reply, err = conn.Do("FT.MGET", args...)
if reply != nil {
var array_reply []interface{}
array_reply, err = redis.Values(reply, err)
if err != nil {
return
}
for i := 0; i < len(array_reply); i++ {
if array_reply[i] != nil {
var innerArray []interface{}
innerArray, err = redis.Values(array_reply[i], nil)
if err != nil {
return
}
if len(array_reply) > 0 {
document := NewDocument(documentIds[i], 1)
document.loadFields(innerArray)
docs[i] = &document
}
} else {
docs[i] = nil
}
}
}
return
}
// Explain Return a textual string explaining the query (execution plan)
func (i *Client) Explain(q *Query) (string, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{i.name}
args = append(args, q.serialize()...)
return redis.String(conn.Do("FT.EXPLAIN", args...))
}
// Drop deletes the index and all the keys associated with it.
func (i *Client) Drop() error {
conn := i.pool.Get()
defer conn.Close()
_, err := conn.Do("FT.DROP", i.name)
return err
}
// Deletes the secondary index and optionally the associated hashes
//
// Available since RediSearch 2.0.
//
// By default, DropIndex() which is a wrapper for RediSearch FT.DROPINDEX does not delete the document
// hashes associated with the index. Setting the argument deleteDocuments to true deletes the hashes as well.
func (i *Client) DropIndex(deleteDocuments bool) error {
conn := i.pool.Get()
defer conn.Close()
var err error = nil
if deleteDocuments {
_, err = conn.Do("FT.DROPINDEX", i.name, "DD")
} else {
_, err = conn.Do("FT.DROPINDEX", i.name)
}
return err
}
// Delete the document from the index, optionally delete the actual document
// WARNING: As of RediSearch 2.0 and above, FT.DEL always deletes the underlying document.
// Deprecated: This function is deprecated on RediSearch 2.0 and above, use DeleteDocument() instead
func (i *Client) Delete(docId string, deleteDocument bool) (err error) {
return i.delDoc(docId, deleteDocument)
}
// DeleteDocument delete the document from the index and also delete the HASH key in which the document is stored
func (i *Client) DeleteDocument(docId string) (err error) {
return i.delDoc(docId, true)
}
// Internal method to be used by Delete() and DeleteDocument()
func (i *Client) delDoc(docId string, deleteDocument bool) (err error) {
conn := i.pool.Get()
defer conn.Close()
if deleteDocument {
_, err = conn.Do("FT.DEL", i.name, docId, "DD")
} else {
_, err = conn.Do("FT.DEL", i.name, docId)
}
return
}
// Internal method to be used by Info()
func (info *IndexInfo) setTarget(key string, value interface{}) error {
v := reflect.ValueOf(info).Elem()
for i := 0; i < v.NumField(); i++ {
tag := v.Type().Field(i).Tag.Get("redis")
if tag == key {
targetInfo := v.Field(i)
switch targetInfo.Kind() {
case reflect.String:
s, _ := redis.String(value, nil)
targetInfo.SetString(s)
case reflect.Uint64:
u, _ := redis.Uint64(value, nil)
targetInfo.SetUint(u)
case reflect.Float64:
f, _ := redis.Float64(value, nil)
targetInfo.SetFloat(f)
case reflect.Bool:
f, _ := redis.Uint64(value, nil)
if f == 0 {
targetInfo.SetBool(false)
} else {
targetInfo.SetBool(true)
}
default:
panic("Tag set without handler")
}
return nil
}
}
return errors.New("setTarget: No handler defined for :" + key)
}
func sliceIndex(haystack []string, needle string) int {
for pos, elem := range haystack {
if elem == needle {
return pos
}
}
return -1
}
func (info *IndexInfo) loadSchema(values []interface{}, options []string) {
// Values are a list of fields
scOptions := Options{}
for _, opt := range options {
switch strings.ToUpper(opt) {
case "NOFIELDS":
scOptions.NoFieldFlags = true
case "NOFREQS":
scOptions.NoFrequencies = true
case "NOOFFSETS":
scOptions.NoOffsetVectors = true
}
}
sc := NewSchema(scOptions)
for _, specTmp := range values {
// spec, isArr := specTmp.([]string)
// if !isArr {
// panic("Value is not an array of strings!")
// }
rawSpec, err := redis.Values(specTmp, nil)
if err != nil {
log.Printf("Warning: Couldn't read schema. %s\n", err.Error())
continue
}
spec := make([]string, 0)
// Convert all to string, if not already string
for _, elem := range rawSpec {
s, isString := elem.(string)
if !isString {
s, err = redis.String(elem, err)
if err != nil {
log.Printf("Warning: Couldn't read schema. %s\n", err.Error())
continue
}
}
spec = append(spec, s)
}
// Name, Type,
if len(spec) < 3 {
log.Printf("Invalid spec")
continue
}
var options []string
if len(spec) > 3 {
options = spec[3:]
} else {
options = []string{}
}
f := Field{Name: spec[sliceIndex(spec, "identifier")+1]}
switch strings.ToUpper(spec[sliceIndex(spec, "type")+1]) {
case "TAG":
f.Type = TagField
tfOptions := TagFieldOptions{}
if wIdx := sliceIndex(options, "SEPARATOR"); wIdx != -1 {
tfOptions.Separator = options[wIdx+1][0]
}
f.Options = tfOptions
case "GEO":
f.Type = GeoField
case "NUMERIC":
f.Type = NumericField
nfOptions := NumericFieldOptions{}
if sliceIndex(options, "SORTABLE") != -1 {
nfOptions.Sortable = true
}
f.Options = nfOptions
case "TEXT":
f.Type = TextField
tfOptions := TextFieldOptions{}
if sliceIndex(options, "SORTABLE") != -1 {
tfOptions.Sortable = true
}
if wIdx := sliceIndex(options, "WEIGHT"); wIdx != -1 && wIdx+1 != len(spec) {
weightString := options[wIdx+1]
weight64, _ := strconv.ParseFloat(weightString, 32)
tfOptions.Weight = float32(weight64)
}
f.Options = tfOptions
case "VECTOR":
f.Type = VectorField
f.Options = VectorFieldOptions{}
}
sc = sc.AddField(f)
}
info.Schema = *sc
}
// Info - Get information about the index. This can also be used to check if the
// index exists
func (i *Client) Info() (*IndexInfo, error) {
conn := i.pool.Get()
defer conn.Close()
res, err := redis.Values(conn.Do("FT.INFO", i.name))
if err != nil {
return nil, err
}
ret := IndexInfo{}
var schemaAttributes []interface{}
var indexOptions []string
// Iterate over the values
for ii := 0; ii < len(res); ii += 2 {
key, _ := redis.String(res[ii], nil)
if err := ret.setTarget(key, res[ii+1]); err == nil {
continue
}
switch key {
case "index_options":
indexOptions, _ = redis.Strings(res[ii+1], nil)
case "fields":
schemaAttributes, _ = redis.Values(res[ii+1], nil)
case "attributes":
for _, attr := range res[ii+1].([]interface{}) {
l := len(attr.([]interface{}))
schemaAttributes = append(schemaAttributes, attr.([]interface{})[3:l])
}
}
}
if schemaAttributes != nil {
ret.loadSchema(schemaAttributes, indexOptions)
}
return &ret, nil
}
// Set runtime configuration option
func (i *Client) SetConfig(option string, value string) (string, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{"SET", option, value}
return redis.String(conn.Do("FT.CONFIG", args...))
}
// Get runtime configuration option value
func (i *Client) GetConfig(option string) (map[string]string, error) { | args := redis.Args{"GET", option}
values, err := redis.Values(conn.Do("FT.CONFIG", args...))
if err != nil {
return nil, err
}
m := make(map[string]string)
valLen := len(values)
for i := 0; i < valLen; i++ {
kvs, _ := redis.Strings(values[i], nil)
if kvs != nil && len(kvs) == 2 {
m[kvs[0]] = kvs[1]
}
}
return m, nil
}
// Get the distinct tags indexed in a Tag field
func (i *Client) GetTagVals(index string, filedName string) ([]string, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{index, filedName}
return redis.Strings(conn.Do("FT.TAGVALS", args...))
}
// SynAdd adds a synonym group.
// Deprecated: This function is not longer supported on RediSearch 2.0 and above, use SynUpdate instead
func (i *Client) SynAdd(indexName string, terms []string) (int64, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{indexName}.AddFlat(terms)
return redis.Int64(conn.Do("FT.SYNADD", args...))
}
// SynUpdate updates a synonym group, with additional terms.
func (i *Client) SynUpdate(indexName string, synonymGroupId int64, terms []string) (string, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{indexName, synonymGroupId}.AddFlat(terms)
return redis.String(conn.Do("FT.SYNUPDATE", args...))
}
// SynDump dumps the contents of a synonym group.
func (i *Client) SynDump(indexName string) (map[string][]int64, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{indexName}
values, err := redis.Values(conn.Do("FT.SYNDUMP", args...))
if err != nil {
return nil, err
}
valLen := len(values)
if valLen%2 != 0 {
return nil, errors.New("SynDump: expects even number of values result")
}
m := make(map[string][]int64, valLen/2)
for i := 0; i < valLen; i += 2 {
key := values[i].([]byte)
gids, err := redis.Int64s(values[i+1], nil)
if err != nil {
return nil, err
}
m[string(key)] = gids
}
return m, nil
}
// Adds a document to the index from an existing HASH key in Redis.
// Deprecated: This function is not longer supported on RediSearch 2.0 and above, use HSET instead
// See the example ExampleClient_CreateIndexWithIndexDefinition for a deeper understanding on how to move towards using hashes on your application
func (i *Client) AddHash(docId string, score float32, language string, replace bool) (string, error) {
conn := i.pool.Get()
defer conn.Close()
args := redis.Args{i.name, docId, score}
if language != "" {
args = args.Add("LANGUAGE", language)
}
if replace {
args = args.Add("REPLACE")
}
return redis.String(conn.Do("FT.ADDHASH", args...))
}
// Returns a list of all existing indexes.
func (i *Client) List() ([]string, error) {
conn := i.pool.Get()
defer conn.Close()
res, err := redis.Values(conn.Do("FT._LIST"))
if err != nil {
return nil, err
}
var indexes []string
// Iterate over the values
for ii := 0; ii < len(res); ii += 1 {
key, _ := redis.String(res[ii], nil)
indexes = append(indexes, key)
}
return indexes, nil
} | conn := i.pool.Get()
defer conn.Close()
|
User.ts | import { model, Schema } from "mongoose";
const UserSchema = new Schema({
Name: {
type: String,
required: true
},
Birthday: {
type: Date,
required: true
},
Gender: {
type: String,
required: false
},
Lists: [{
type: Schema.Types.ObjectId,
required: true | type: String,
required: true,
unique: true
},
Password: {
type: String,
required: true
},
}, {
versionKey: false, // You should be aware of the outcome after set to false
});
module.exports = model("User", UserSchema); | }],
Username: { |
users_analysis.rs | #[derive(Queryable, Serialize, Deserialize, Debug)]
pub struct | {
pub count_answers: i64,
pub count_is_best_answers: i64,
pub count_questions: i64,
}
| UserAnalysis |
test_data_source_rule.py | import pytest
import json
import os
from lkmltools.linter.rules.filerules.data_source_rule import DataSourceRule
from conftest import get_lookml_from_raw_lookml
def test_run1():
raw_lookml = """
view: aview {
sql_table_name: bqdw.engagement_score ;;
}
"""
lookml = get_lookml_from_raw_lookml(raw_lookml, "aview.view")
relevant, passed = DataSourceRule().run(lookml)
assert relevant
assert passed
if os.path.exists(lookml.infilepath):
os.remove(lookml.infilepath)
def test_run2():
raw_lookml = """
view: aview {
dimension: memberID {
type: string
}
}
"""
lookml = get_lookml_from_raw_lookml(raw_lookml, "aview.view")
relevant, passed = DataSourceRule().run(lookml)
assert relevant
assert not passed
if os.path.exists(lookml.infilepath):
os.remove(lookml.infilepath)
def test_run3():
raw_lookml = """
view: aview {
derived_table: {
sql: SELECT * from table ;;
}
dimension: memberID {
type: string
}
}
"""
lookml = get_lookml_from_raw_lookml(raw_lookml, "aview.view")
relevant, passed = DataSourceRule().run(lookml)
assert relevant
assert passed
if os.path.exists(lookml.infilepath):
os.remove(lookml.infilepath)
def | ():
raw_lookml = """
connection: "datawarehouse"
include: "*.view.lkml"
explore: an_explore {
}
"""
lookml = get_lookml_from_raw_lookml(raw_lookml, "amodel.model")
relevant, passed = DataSourceRule().run(lookml)
assert not relevant
assert not passed
if os.path.exists(lookml.infilepath):
os.remove(lookml.infilepath)
| test_run4 |
useNonNullableContext.ts | import { Context } from 'react'
import { useContext } from 'react'
export function useNonNullableContext<T>(context: Context<T | null>) {
const store = useContext(context) | }
return store
} | if (store === null) {
throw new Error('Store cannot be null, please add a context provider') |
errors.rs | //! Errors for this crate.
use crate::parser;
use std::error;
use std::fmt;
/// An error type for this crate.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Error<'input> {
/// An error ocurred while parsing.
Parser(parser::Error<'input>),
/// Received more input than expected.
MoreInput,
}
impl<'input> fmt::Display for Error<'input> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
Parser(ref p) => write!(fmt, "parser error: {}", p),
MoreInput => write!(fmt, "more input"),
}
}
}
impl<'input> error::Error for Error<'input> {
fn | (&self) -> &str {
use self::Error::*;
match *self {
Parser(ref p) => p.description(),
MoreInput => "more input",
}
}
}
impl<'input> From<parser::Error<'input>> for Error<'input> {
fn from(value: parser::Error<'input>) -> Self {
Error::Parser(value)
}
}
| description |
utils.go | package vault
import (
"os"
"regexp"
"runtime"
"strings"
)
// ParsePath splits the given path string into its respective secret path
// and contained key parts
func ParsePath(path string) (secret, key string) {
secret = path
if idx := strings.LastIndex(path, ":"); idx >= 0 {
secret = path[:idx]
key = path[idx+1:]
}
secret = Canonicalize(secret)
return
}
// PathHasKey returns true if the given path has a key specified in its syntax.
// False otherwise.
func | (path string) bool {
_, key := ParsePath(path)
return key != ""
}
func Canonicalize(p string) string {
p = strings.TrimSuffix(p, "/")
p = strings.TrimPrefix(p, "/")
re := regexp.MustCompile("//+")
p = re.ReplaceAllString(p, "/")
return p
}
func userHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("USERPROFILE")
if home == "" {
home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
return home
}
return os.Getenv("HOME")
}
| PathHasKey |
0059_auto_20190314_1744.py | # Generated by Django 2.1.7 on 2019-03-14 17:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0058_auto_20190312_1716")]
operations = [
migrations.AlterField(
model_name="channel",
name="kind",
field=models.CharField(
choices=[
("email", "Email"), | ("slack", "Slack"),
("pd", "PagerDuty"),
("pagertree", "PagerTree"),
("pagerteam", "Pager Team"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "OpsGenie"),
("victorops", "VictorOps"),
("discord", "Discord"),
("telegram", "Telegram"),
("sms", "SMS"),
("zendesk", "Zendesk"),
("trello", "Trello"),
("matrix", "Matrix"),
],
max_length=20,
),
)
] | ("webhook", "Webhook"),
("hipchat", "HipChat"), |
cri_stats_provider_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"math/rand"
"os"
"path/filepath"
"runtime"
"testing"
"time"
gomock "github.com/golang/mock/gomock"
cadvisorfs "github.com/google/cadvisor/fs"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
critest "k8s.io/cri-api/pkg/apis/testing"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/volume"
)
const (
offsetInodeUsage = iota
offsetUsage
)
const (
seedRoot = 0
seedKubelet = 200
seedMisc = 300
seedSandbox0 = 1000
seedContainer0 = 2000
seedSandbox1 = 3000
seedContainer1 = 4000
seedContainer2 = 5000
seedSandbox2 = 6000
seedContainer3 = 7000
seedSandbox3 = 8000
)
const (
pName0 = "pod0"
pName1 = "pod1"
pName2 = "pod2"
)
const (
cName0 = "container0-name"
cName1 = "container1-name"
cName2 = "container2-name"
cName3 = "container3-name"
cName5 = "container5-name"
cName6 = "container6-name"
cName7 = "container7-name"
cName8 = "container8-name"
)
func TestCRIListPodStats(t *testing.T) {
var (
imageFsMountpoint = "/test/mount/point"
unknownMountpoint = "/unknown/mount/point"
imageFsInfo = getTestFsInfo(2000)
rootFsInfo = getTestFsInfo(1000)
sandbox0 = makeFakePodSandbox("sandbox0-name", "sandbox0-uid", "sandbox0-ns", false)
sandbox0Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox0.PodSandboxStatus.Metadata.Uid))
container0 = makeFakeContainer(sandbox0, cName0, 0, false)
containerStats0 = makeFakeContainerStats(container0, imageFsMountpoint)
containerLogStats0 = makeFakeLogStats(1000)
container1 = makeFakeContainer(sandbox0, cName1, 0, false)
containerStats1 = makeFakeContainerStats(container1, unknownMountpoint)
containerLogStats1 = makeFakeLogStats(2000)
sandbox1 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", false)
sandbox1Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox1.PodSandboxStatus.Metadata.Uid))
container2 = makeFakeContainer(sandbox1, cName2, 0, false)
containerStats2 = makeFakeContainerStats(container2, imageFsMountpoint)
containerLogStats2 = makeFakeLogStats(3000)
sandbox2 = makeFakePodSandbox("sandbox2-name", "sandbox2-uid", "sandbox2-ns", false)
sandbox2Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox2.PodSandboxStatus.Metadata.Uid))
container3 = makeFakeContainer(sandbox2, cName3, 0, true)
containerStats3 = makeFakeContainerStats(container3, imageFsMountpoint)
container4 = makeFakeContainer(sandbox2, cName3, 1, false)
containerStats4 = makeFakeContainerStats(container4, imageFsMountpoint)
containerLogStats4 = makeFakeLogStats(4000)
// Running pod with a terminated container and a running container
sandbox3 = makeFakePodSandbox("sandbox3-name", "sandbox3-uid", "sandbox3-ns", false)
sandbox3Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox3.PodSandboxStatus.Metadata.Uid))
container5 = makeFakeContainer(sandbox3, cName5, 0, true)
containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint)
containerLogStats5 = makeFakeLogStats(5000)
container8 = makeFakeContainer(sandbox3, cName8, 0, false)
containerStats8 = makeFakeContainerStats(container8, imageFsMountpoint)
containerLogStats8 = makeFakeLogStats(6000)
// Terminated pod sandbox
sandbox4 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", true)
container6 = makeFakeContainer(sandbox4, cName6, 0, true)
containerStats6 = makeFakeContainerStats(container6, imageFsMountpoint)
// Terminated pod
sandbox5 = makeFakePodSandbox("sandbox1-name", "sandbox5-uid", "sandbox1-ns", true)
container7 = makeFakeContainer(sandbox5, cName7, 0, true)
containerStats7 = makeFakeContainerStats(container7, imageFsMountpoint)
podLogName0 = "pod-log-0"
podLogName1 = "pod-log-1"
podLogStats0 = makeFakeLogStats(5000)
podLogStats1 = makeFakeLogStats(6000)
)
var (
mockCadvisor = new(cadvisortest.Mock)
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
mockPodManager = new(kubepodtest.MockManager)
resourceAnalyzer = new(fakeResourceAnalyzer)
fakeRuntimeService = critest.NewFakeRuntimeService()
fakeImageService = critest.NewFakeImageService()
)
infos := map[string]cadvisorapiv2.ContainerInfo{
"/": getTestContainerInfo(seedRoot, "", "", ""),
"/kubelet": getTestContainerInfo(seedKubelet, "", "", ""),
"/system": getTestContainerInfo(seedMisc, "", "", ""),
sandbox0.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox0Cgroup: getTestContainerInfo(seedSandbox0, "", "", ""),
container0.ContainerStatus.Id: getTestContainerInfo(seedContainer0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName0),
container1.ContainerStatus.Id: getTestContainerInfo(seedContainer1, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName1),
sandbox1.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox1, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox1Cgroup: getTestContainerInfo(seedSandbox1, "", "", ""),
container2.ContainerStatus.Id: getTestContainerInfo(seedContainer2, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, cName2),
sandbox2.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox2, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox2Cgroup: getTestContainerInfo(seedSandbox2, "", "", ""),
container4.ContainerStatus.Id: getTestContainerInfo(seedContainer3, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, cName3),
sandbox3Cgroup: getTestContainerInfo(seedSandbox3, "", "", ""),
}
options := cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName,
Count: 2,
Recursive: true,
}
mockCadvisor.
On("ContainerInfoV2", "/", options).Return(infos, nil).
On("RootFsInfo").Return(rootFsInfo, nil).
On("GetDirFsInfo", imageFsMountpoint).Return(imageFsInfo, nil).
On("GetDirFsInfo", unknownMountpoint).Return(cadvisorapiv2.FsInfo{}, cadvisorfs.ErrNoSuchDevice)
fakeRuntimeService.SetFakeSandboxes([]*critest.FakePodSandbox{
sandbox0, sandbox1, sandbox2, sandbox3, sandbox4, sandbox5,
})
fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{
container0, container1, container2, container3, container4, container5, container6, container7, container8,
})
fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{
containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8,
})
ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"})
persistentVolumes := makeFakeVolumeStats([]string{"persisVolume1, persisVolumes2"})
resourceAnalyzer.podVolumeStats = serverstats.PodVolumeStats{
EphemeralVolumes: ephemeralVolumes,
PersistentVolumes: persistentVolumes,
}
fakeLogStats := map[string]*volume.Metrics{
kuberuntime.BuildContainerLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid"), cName0): containerLogStats0,
kuberuntime.BuildContainerLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid"), cName1): containerLogStats1,
kuberuntime.BuildContainerLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid"), cName2): containerLogStats2,
kuberuntime.BuildContainerLogsDirectory("sandbox2-ns", "sandbox2-name", types.UID("sandbox2-uid"), cName3): containerLogStats4,
kuberuntime.BuildContainerLogsDirectory("sandbox3-ns", "sandbox3-name", types.UID("sandbox3-uid"), cName5): containerLogStats5,
kuberuntime.BuildContainerLogsDirectory("sandbox3-ns", "sandbox3-name", types.UID("sandbox3-uid"), cName8): containerLogStats8,
filepath.Join(kuberuntime.BuildPodLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid")), podLogName0): podLogStats0,
filepath.Join(kuberuntime.BuildPodLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid")), podLogName1): podLogStats1,
}
fakeLogStatsProvider := NewFakeLogMetricsService(fakeLogStats)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
fakeOS := &kubecontainertest.FakeOS{}
fakeOS.ReadDirFn = func(path string) ([]os.FileInfo, error) {
var fileInfos []os.FileInfo
mockFI := kubecontainertest.NewMockFileInfo(ctrl)
switch path {
case kuberuntime.BuildPodLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid")):
mockFI.EXPECT().Name().Return(podLogName0)
case kuberuntime.BuildPodLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid")):
mockFI.EXPECT().Name().Return(podLogName1)
default:
return nil, nil
}
mockFI.EXPECT().IsDir().Return(false)
fileInfos = append(fileInfos, mockFI)
return fileInfos, nil
}
provider := NewCRIStatsProvider(
mockCadvisor,
resourceAnalyzer,
mockPodManager,
mockRuntimeCache,
fakeRuntimeService,
fakeImageService,
fakeLogStatsProvider,
fakeOS,
)
stats, err := provider.ListPodStats()
assert := assert.New(t)
assert.NoError(err)
assert.Equal(4, len(stats))
podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats)
for _, s := range stats {
podStatsMap[s.PodRef] = s
}
p0 := podStatsMap[statsapi.PodReference{Name: "sandbox0-name", UID: "sandbox0-uid", Namespace: "sandbox0-ns"}]
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
assert.Equal(2, len(p0.Containers))
checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1},
[]*volume.Metrics{containerLogStats0, containerLogStats1}, podLogStats0)
containerStatsMap := make(map[string]statsapi.ContainerStats)
for _, s := range p0.Containers {
containerStatsMap[s.Name] = s
}
c0 := containerStatsMap[cName0]
assert.Equal(container0.CreatedAt, c0.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c0, infos[container0.ContainerStatus.Id].Stats[0])
checkCRIRootfsStats(assert, c0, containerStats0, &imageFsInfo)
checkCRILogsStats(assert, c0, &rootFsInfo, containerLogStats0)
c1 := containerStatsMap[cName1]
assert.Equal(container1.CreatedAt, c1.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c1, infos[container1.ContainerStatus.Id].Stats[0])
checkCRIRootfsStats(assert, c1, containerStats1, nil)
checkCRILogsStats(assert, c1, &rootFsInfo, containerLogStats1)
checkCRINetworkStats(assert, p0.Network, infos[sandbox0.PodSandboxStatus.Id].Stats[0].Network)
checkCRIPodCPUAndMemoryStats(assert, p0, infos[sandbox0Cgroup].Stats[0])
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
assert.Equal(1, len(p1.Containers))
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2},
[]*volume.Metrics{containerLogStats2}, podLogStats1)
c2 := p1.Containers[0]
assert.Equal(cName2, c2.Name)
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c2, infos[container2.ContainerStatus.Id].Stats[0])
checkCRIRootfsStats(assert, c2, containerStats2, &imageFsInfo)
checkCRILogsStats(assert, c2, &rootFsInfo, containerLogStats2)
checkCRINetworkStats(assert, p1.Network, infos[sandbox1.PodSandboxStatus.Id].Stats[0].Network)
checkCRIPodCPUAndMemoryStats(assert, p1, infos[sandbox1Cgroup].Stats[0])
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
assert.Equal(1, len(p2.Containers))
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4},
[]*volume.Metrics{containerLogStats4}, nil)
c3 := p2.Containers[0]
assert.Equal(cName3, c3.Name)
assert.Equal(container4.CreatedAt, c3.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c3, infos[container4.ContainerStatus.Id].Stats[0])
checkCRIRootfsStats(assert, c3, containerStats4, &imageFsInfo)
checkCRILogsStats(assert, c3, &rootFsInfo, containerLogStats4)
checkCRINetworkStats(assert, p2.Network, infos[sandbox2.PodSandboxStatus.Id].Stats[0].Network)
checkCRIPodCPUAndMemoryStats(assert, p2, infos[sandbox2Cgroup].Stats[0])
p3 := podStatsMap[statsapi.PodReference{Name: "sandbox3-name", UID: "sandbox3-uid", Namespace: "sandbox3-ns"}]
assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano())
assert.Equal(1, len(p3.Containers))
c8 := p3.Containers[0]
assert.Equal(cName8, c8.Name)
assert.Equal(container8.CreatedAt, c8.StartTime.UnixNano())
assert.NotNil(c8.CPU.Time)
assert.NotNil(c8.Memory.Time)
checkCRIPodCPUAndMemoryStats(assert, p3, infos[sandbox3Cgroup].Stats[0])
mockCadvisor.AssertExpectations(t)
}
func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
var (
imageFsMountpoint = "/test/mount/point"
unknownMountpoint = "/unknown/mount/point"
sandbox0 = makeFakePodSandbox("sandbox0-name", "sandbox0-uid", "sandbox0-ns", false)
sandbox0Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox0.PodSandboxStatus.Metadata.Uid))
container0 = makeFakeContainer(sandbox0, cName0, 0, false)
containerStats0 = makeFakeContainerStats(container0, imageFsMountpoint)
container1 = makeFakeContainer(sandbox0, cName1, 0, false)
containerStats1 = makeFakeContainerStats(container1, unknownMountpoint)
sandbox1 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", false)
sandbox1Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox1.PodSandboxStatus.Metadata.Uid))
container2 = makeFakeContainer(sandbox1, cName2, 0, false)
containerStats2 = makeFakeContainerStats(container2, imageFsMountpoint)
sandbox2 = makeFakePodSandbox("sandbox2-name", "sandbox2-uid", "sandbox2-ns", false)
sandbox2Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox2.PodSandboxStatus.Metadata.Uid))
container3 = makeFakeContainer(sandbox2, cName3, 0, true)
containerStats3 = makeFakeContainerStats(container3, imageFsMountpoint)
container4 = makeFakeContainer(sandbox2, cName3, 1, false)
containerStats4 = makeFakeContainerStats(container4, imageFsMountpoint)
// Running pod with a terminated container and a running container
sandbox3 = makeFakePodSandbox("sandbox3-name", "sandbox3-uid", "sandbox3-ns", false)
sandbox3Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox3.PodSandboxStatus.Metadata.Uid))
container5 = makeFakeContainer(sandbox3, cName5, 0, true)
containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint)
container8 = makeFakeContainer(sandbox3, cName8, 0, false)
containerStats8 = makeFakeContainerStats(container8, imageFsMountpoint)
// Terminated pod sandbox
sandbox4 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", true)
container6 = makeFakeContainer(sandbox4, cName6, 0, true)
containerStats6 = makeFakeContainerStats(container6, imageFsMountpoint)
// Terminated pod
sandbox5 = makeFakePodSandbox("sandbox1-name", "sandbox5-uid", "sandbox1-ns", true)
container7 = makeFakeContainer(sandbox5, cName7, 0, true)
containerStats7 = makeFakeContainerStats(container7, imageFsMountpoint)
)
var (
mockCadvisor = new(cadvisortest.Mock)
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
mockPodManager = new(kubepodtest.MockManager)
resourceAnalyzer = new(fakeResourceAnalyzer)
fakeRuntimeService = critest.NewFakeRuntimeService()
)
infos := map[string]cadvisorapiv2.ContainerInfo{
"/": getTestContainerInfo(seedRoot, "", "", ""),
"/kubelet": getTestContainerInfo(seedKubelet, "", "", ""),
"/system": getTestContainerInfo(seedMisc, "", "", ""),
sandbox0.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox0Cgroup: getTestContainerInfo(seedSandbox0, "", "", ""),
container0.ContainerStatus.Id: getTestContainerInfo(seedContainer0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName0),
container1.ContainerStatus.Id: getTestContainerInfo(seedContainer1, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName1),
sandbox1.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox1, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox1Cgroup: getTestContainerInfo(seedSandbox1, "", "", ""),
container2.ContainerStatus.Id: getTestContainerInfo(seedContainer2, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, cName2),
sandbox2.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox2, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
sandbox2Cgroup: getTestContainerInfo(seedSandbox2, "", "", ""),
container4.ContainerStatus.Id: getTestContainerInfo(seedContainer3, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, cName3),
sandbox3Cgroup: getTestContainerInfo(seedSandbox3, "", "", ""),
}
options := cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName,
Count: 2,
Recursive: true,
}
mockCadvisor.
On("ContainerInfoV2", "/", options).Return(infos, nil)
fakeRuntimeService.SetFakeSandboxes([]*critest.FakePodSandbox{
sandbox0, sandbox1, sandbox2, sandbox3, sandbox4, sandbox5,
})
fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{
container0, container1, container2, container3, container4, container5, container6, container7, container8,
})
fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{
containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8,
})
ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"})
persistentVolumes := makeFakeVolumeStats([]string{"persisVolume1, persisVolumes2"})
resourceAnalyzer.podVolumeStats = serverstats.PodVolumeStats{
EphemeralVolumes: ephemeralVolumes,
PersistentVolumes: persistentVolumes,
}
provider := NewCRIStatsProvider(
mockCadvisor,
resourceAnalyzer,
mockPodManager,
mockRuntimeCache,
fakeRuntimeService,
nil,
nil,
&kubecontainertest.FakeOS{},
)
stats, err := provider.ListPodCPUAndMemoryStats()
assert := assert.New(t)
assert.NoError(err)
assert.Equal(4, len(stats))
podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats)
for _, s := range stats {
podStatsMap[s.PodRef] = s
}
p0 := podStatsMap[statsapi.PodReference{Name: "sandbox0-name", UID: "sandbox0-uid", Namespace: "sandbox0-ns"}]
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
assert.Equal(2, len(p0.Containers))
assert.Nil(p0.EphemeralStorage)
assert.Nil(p0.VolumeStats)
assert.Nil(p0.Network)
checkCRIPodCPUAndMemoryStats(assert, p0, infos[sandbox0Cgroup].Stats[0])
containerStatsMap := make(map[string]statsapi.ContainerStats)
for _, s := range p0.Containers {
containerStatsMap[s.Name] = s
}
c0 := containerStatsMap[cName0]
assert.Equal(container0.CreatedAt, c0.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c0, infos[container0.ContainerStatus.Id].Stats[0])
assert.Nil(c0.Rootfs)
assert.Nil(c0.Logs)
assert.Nil(c0.Accelerators)
assert.Nil(c0.UserDefinedMetrics)
c1 := containerStatsMap[cName1]
assert.Equal(container1.CreatedAt, c1.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c1, infos[container1.ContainerStatus.Id].Stats[0])
assert.Nil(c1.Rootfs)
assert.Nil(c1.Logs)
assert.Nil(c1.Accelerators)
assert.Nil(c1.UserDefinedMetrics)
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
assert.Equal(1, len(p1.Containers))
assert.Nil(p1.EphemeralStorage)
assert.Nil(p1.VolumeStats)
assert.Nil(p1.Network)
checkCRIPodCPUAndMemoryStats(assert, p1, infos[sandbox1Cgroup].Stats[0])
c2 := p1.Containers[0]
assert.Equal(cName2, c2.Name)
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c2, infos[container2.ContainerStatus.Id].Stats[0])
assert.Nil(c2.Rootfs)
assert.Nil(c2.Logs)
assert.Nil(c2.Accelerators)
assert.Nil(c2.UserDefinedMetrics)
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
assert.Equal(1, len(p2.Containers))
assert.Nil(p2.EphemeralStorage)
assert.Nil(p2.VolumeStats)
assert.Nil(p2.Network)
checkCRIPodCPUAndMemoryStats(assert, p2, infos[sandbox2Cgroup].Stats[0])
c3 := p2.Containers[0]
assert.Equal(cName3, c3.Name)
assert.Equal(container4.CreatedAt, c3.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c3, infos[container4.ContainerStatus.Id].Stats[0])
assert.Nil(c2.Rootfs)
assert.Nil(c2.Logs)
assert.Nil(c2.Accelerators)
assert.Nil(c2.UserDefinedMetrics)
p3 := podStatsMap[statsapi.PodReference{Name: "sandbox3-name", UID: "sandbox3-uid", Namespace: "sandbox3-ns"}]
assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano())
assert.Equal(1, len(p3.Containers))
c8 := p3.Containers[0]
assert.Equal(cName8, c8.Name)
assert.Equal(container8.CreatedAt, c8.StartTime.UnixNano())
assert.NotNil(c8.CPU.Time)
assert.NotNil(c8.Memory.Time)
checkCRIPodCPUAndMemoryStats(assert, p3, infos[sandbox3Cgroup].Stats[0])
mockCadvisor.AssertExpectations(t)
}
func TestCRIImagesFsStats(t *testing.T) {
var (
imageFsMountpoint = "/test/mount/point"
imageFsInfo = getTestFsInfo(2000)
imageFsUsage = makeFakeImageFsUsage(imageFsMountpoint)
)
var (
mockCadvisor = new(cadvisortest.Mock)
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
mockPodManager = new(kubepodtest.MockManager)
resourceAnalyzer = new(fakeResourceAnalyzer)
fakeRuntimeService = critest.NewFakeRuntimeService()
fakeImageService = critest.NewFakeImageService()
fakeLogStatsProvider = NewFakeLogMetricsService(nil)
)
mockCadvisor.On("GetDirFsInfo", imageFsMountpoint).Return(imageFsInfo, nil)
fakeImageService.SetFakeFilesystemUsage([]*runtimeapi.FilesystemUsage{
imageFsUsage,
})
provider := NewCRIStatsProvider(
mockCadvisor,
resourceAnalyzer,
mockPodManager,
mockRuntimeCache,
fakeRuntimeService,
fakeImageService,
fakeLogStatsProvider,
&kubecontainertest.FakeOS{},
)
stats, err := provider.ImageFsStats()
assert := assert.New(t)
assert.NoError(err)
assert.Equal(imageFsUsage.Timestamp, stats.Time.UnixNano())
assert.Equal(imageFsInfo.Available, *stats.AvailableBytes)
assert.Equal(imageFsInfo.Capacity, *stats.CapacityBytes)
assert.Equal(imageFsInfo.InodesFree, stats.InodesFree)
assert.Equal(imageFsInfo.Inodes, stats.Inodes)
assert.Equal(imageFsUsage.UsedBytes.Value, *stats.UsedBytes)
assert.Equal(imageFsUsage.InodesUsed.Value, *stats.InodesUsed)
mockCadvisor.AssertExpectations(t)
}
func makeFakePodSandbox(name, uid, namespace string, terminated bool) *critest.FakePodSandbox {
p := &critest.FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: name,
Uid: uid,
Namespace: namespace,
},
State: runtimeapi.PodSandboxState_SANDBOX_READY,
CreatedAt: time.Now().UnixNano(),
},
}
if terminated {
p.PodSandboxStatus.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
}
p.PodSandboxStatus.Id = string(uuid.NewUUID())
return p
}
func | (sandbox *critest.FakePodSandbox, name string, attempt uint32, terminated bool) *critest.FakeContainer {
sandboxID := sandbox.PodSandboxStatus.Id
c := &critest.FakeContainer{
SandboxID: sandboxID,
ContainerStatus: runtimeapi.ContainerStatus{
Metadata: &runtimeapi.ContainerMetadata{Name: name, Attempt: attempt},
Image: &runtimeapi.ImageSpec{},
ImageRef: "fake-image-ref",
CreatedAt: time.Now().UnixNano(),
},
}
c.ContainerStatus.Labels = map[string]string{
"io.kubernetes.pod.name": sandbox.Metadata.Name,
"io.kubernetes.pod.uid": sandbox.Metadata.Uid,
"io.kubernetes.pod.namespace": sandbox.Metadata.Namespace,
"io.kubernetes.container.name": name,
}
if terminated {
c.ContainerStatus.State = runtimeapi.ContainerState_CONTAINER_EXITED
} else {
c.ContainerStatus.State = runtimeapi.ContainerState_CONTAINER_RUNNING
}
c.ContainerStatus.Id = string(uuid.NewUUID())
return c
}
func makeFakeContainerStats(container *critest.FakeContainer, imageFsMountpoint string) *runtimeapi.ContainerStats {
containerStats := &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: container.ContainerStatus.Id,
Metadata: container.ContainerStatus.Metadata,
},
WritableLayer: &runtimeapi.FilesystemUsage{
Timestamp: time.Now().UnixNano(),
FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: imageFsMountpoint},
UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100},
InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100},
},
}
if container.State == runtimeapi.ContainerState_CONTAINER_EXITED {
containerStats.Cpu = nil
containerStats.Memory = nil
} else {
containerStats.Cpu = &runtimeapi.CpuUsage{
Timestamp: time.Now().UnixNano(),
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: rand.Uint64()},
}
containerStats.Memory = &runtimeapi.MemoryUsage{
Timestamp: time.Now().UnixNano(),
WorkingSetBytes: &runtimeapi.UInt64Value{Value: rand.Uint64()},
}
}
return containerStats
}
func makeFakeImageFsUsage(fsMountpoint string) *runtimeapi.FilesystemUsage {
return &runtimeapi.FilesystemUsage{
Timestamp: time.Now().UnixNano(),
FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: fsMountpoint},
UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64()},
InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64()},
}
}
func makeFakeVolumeStats(volumeNames []string) []statsapi.VolumeStats {
volumes := make([]statsapi.VolumeStats, len(volumeNames))
availableBytes := rand.Uint64()
capacityBytes := rand.Uint64()
usedBytes := rand.Uint64() / 100
inodes := rand.Uint64()
inodesFree := rand.Uint64()
inodesUsed := rand.Uint64() / 100
for i, name := range volumeNames {
fsStats := statsapi.FsStats{
Time: metav1.NewTime(time.Now()),
AvailableBytes: &availableBytes,
CapacityBytes: &capacityBytes,
UsedBytes: &usedBytes,
Inodes: &inodes,
InodesFree: &inodesFree,
InodesUsed: &inodesUsed,
}
volumes[i] = statsapi.VolumeStats{
FsStats: fsStats,
Name: name,
}
}
return volumes
}
func checkCRICPUAndMemoryStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *cadvisorapiv2.ContainerStats) {
assert.Equal(cs.Timestamp.UnixNano(), actual.CPU.Time.UnixNano())
assert.Equal(cs.Cpu.Usage.Total, *actual.CPU.UsageCoreNanoSeconds)
assert.Equal(cs.CpuInst.Usage.Total, *actual.CPU.UsageNanoCores)
assert.Equal(cs.Memory.Usage, *actual.Memory.UsageBytes)
assert.Equal(cs.Memory.WorkingSet, *actual.Memory.WorkingSetBytes)
assert.Equal(cs.Memory.RSS, *actual.Memory.RSSBytes)
assert.Equal(cs.Memory.ContainerData.Pgfault, *actual.Memory.PageFaults)
assert.Equal(cs.Memory.ContainerData.Pgmajfault, *actual.Memory.MajorPageFaults)
}
func checkCRIRootfsStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats, imageFsInfo *cadvisorapiv2.FsInfo) {
assert.Equal(cs.WritableLayer.Timestamp, actual.Rootfs.Time.UnixNano())
if imageFsInfo != nil {
assert.Equal(imageFsInfo.Available, *actual.Rootfs.AvailableBytes)
assert.Equal(imageFsInfo.Capacity, *actual.Rootfs.CapacityBytes)
assert.Equal(*imageFsInfo.InodesFree, *actual.Rootfs.InodesFree)
assert.Equal(*imageFsInfo.Inodes, *actual.Rootfs.Inodes)
} else {
assert.Nil(actual.Rootfs.AvailableBytes)
assert.Nil(actual.Rootfs.CapacityBytes)
assert.Nil(actual.Rootfs.InodesFree)
assert.Nil(actual.Rootfs.Inodes)
}
assert.Equal(cs.WritableLayer.UsedBytes.Value, *actual.Rootfs.UsedBytes)
assert.Equal(cs.WritableLayer.InodesUsed.Value, *actual.Rootfs.InodesUsed)
}
func checkCRILogsStats(assert *assert.Assertions, actual statsapi.ContainerStats, rootFsInfo *cadvisorapiv2.FsInfo, logStats *volume.Metrics) {
assert.Equal(rootFsInfo.Timestamp, actual.Logs.Time.Time)
assert.Equal(rootFsInfo.Available, *actual.Logs.AvailableBytes)
assert.Equal(rootFsInfo.Capacity, *actual.Logs.CapacityBytes)
assert.Equal(*rootFsInfo.InodesFree, *actual.Logs.InodesFree)
assert.Equal(*rootFsInfo.Inodes, *actual.Logs.Inodes)
assert.Equal(uint64(logStats.Used.Value()), *actual.Logs.UsedBytes)
assert.Equal(uint64(logStats.InodesUsed.Value()), *actual.Logs.InodesUsed)
}
func checkEphemeralStorageStats(assert *assert.Assertions,
actual statsapi.PodStats,
volumes []statsapi.VolumeStats,
containers []*runtimeapi.ContainerStats,
containerLogStats []*volume.Metrics,
podLogStats *volume.Metrics) {
var totalUsed, inodesUsed uint64
for _, container := range containers {
totalUsed = totalUsed + container.WritableLayer.UsedBytes.Value
inodesUsed = inodesUsed + container.WritableLayer.InodesUsed.Value
}
for _, volume := range volumes {
totalUsed = totalUsed + *volume.FsStats.UsedBytes
inodesUsed = inodesUsed + *volume.FsStats.InodesUsed
}
for _, logStats := range containerLogStats {
totalUsed = totalUsed + uint64(logStats.Used.Value())
inodesUsed = inodesUsed + uint64(logStats.InodesUsed.Value())
}
if podLogStats != nil {
totalUsed = totalUsed + uint64(podLogStats.Used.Value())
inodesUsed = inodesUsed + uint64(podLogStats.InodesUsed.Value())
}
assert.Equal(int(totalUsed), int(*actual.EphemeralStorage.UsedBytes))
assert.Equal(int(inodesUsed), int(*actual.EphemeralStorage.InodesUsed))
}
func checkCRINetworkStats(assert *assert.Assertions, actual *statsapi.NetworkStats, expected *cadvisorapiv2.NetworkStats) {
assert.Equal(expected.Interfaces[0].RxBytes, *actual.RxBytes)
assert.Equal(expected.Interfaces[0].RxErrors, *actual.RxErrors)
assert.Equal(expected.Interfaces[0].TxBytes, *actual.TxBytes)
assert.Equal(expected.Interfaces[0].TxErrors, *actual.TxErrors)
}
func checkCRIPodCPUAndMemoryStats(assert *assert.Assertions, actual statsapi.PodStats, cs *cadvisorapiv2.ContainerStats) {
if runtime.GOOS != "linux" {
return
}
assert.Equal(cs.Timestamp.UnixNano(), actual.CPU.Time.UnixNano())
assert.Equal(cs.Cpu.Usage.Total, *actual.CPU.UsageCoreNanoSeconds)
assert.Equal(cs.CpuInst.Usage.Total, *actual.CPU.UsageNanoCores)
assert.Equal(cs.Memory.Usage, *actual.Memory.UsageBytes)
assert.Equal(cs.Memory.WorkingSet, *actual.Memory.WorkingSetBytes)
assert.Equal(cs.Memory.RSS, *actual.Memory.RSSBytes)
assert.Equal(cs.Memory.ContainerData.Pgfault, *actual.Memory.PageFaults)
assert.Equal(cs.Memory.ContainerData.Pgmajfault, *actual.Memory.MajorPageFaults)
}
func makeFakeLogStats(seed int) *volume.Metrics {
m := &volume.Metrics{}
m.Used = resource.NewQuantity(int64(seed+offsetUsage), resource.BinarySI)
m.InodesUsed = resource.NewQuantity(int64(seed+offsetInodeUsage), resource.BinarySI)
return m
}
func TestGetContainerUsageNanoCores(t *testing.T) {
var value0 uint64
var value1 uint64 = 10000000000
// Test with a large container of 100+ CPUs
var value2 uint64 = 188427786383
tests := []struct {
desc string
cpuUsageCache map[string]*cpuUsageRecord
stats *runtimeapi.ContainerStats
expected *uint64
}{
{
desc: "should return nil if stats is nil",
cpuUsageCache: map[string]*cpuUsageRecord{},
},
{
desc: "should return nil if cpu stats is nil",
cpuUsageCache: map[string]*cpuUsageRecord{},
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: nil,
},
},
{
desc: "should return nil if usageCoreNanoSeconds is nil",
cpuUsageCache: map[string]*cpuUsageRecord{},
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: 1,
UsageCoreNanoSeconds: nil,
},
},
},
{
desc: "should return nil if cpu stats is not cached yet",
cpuUsageCache: map[string]*cpuUsageRecord{},
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: 1,
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 10000000000,
},
},
},
},
{
desc: "should return zero value if cached cpu stats is equal to current value",
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: 1,
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 10000000000,
},
},
},
cpuUsageCache: map[string]*cpuUsageRecord{
"1": {
stats: &runtimeapi.CpuUsage{
Timestamp: 0,
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 10000000000,
},
},
},
},
expected: &value0,
},
{
desc: "should return correct value if cached cpu stats is not equal to current value",
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: int64(time.Second / time.Nanosecond),
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 20000000000,
},
},
},
cpuUsageCache: map[string]*cpuUsageRecord{
"1": {
stats: &runtimeapi.CpuUsage{
Timestamp: 0,
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 10000000000,
},
},
},
},
expected: &value1,
},
{
desc: "should return correct value if elapsed UsageCoreNanoSeconds exceeds 18446744073",
stats: &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: "1",
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: int64(time.Second / time.Nanosecond),
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 68172016162105,
},
},
},
cpuUsageCache: map[string]*cpuUsageRecord{
"1": {
stats: &runtimeapi.CpuUsage{
Timestamp: 0,
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{
Value: 67983588375722,
},
},
},
},
expected: &value2,
},
}
for _, test := range tests {
provider := &criStatsProvider{cpuUsageCache: test.cpuUsageCache}
// Before the update, the cached value should be nil
cached := provider.getContainerUsageNanoCores(test.stats)
assert.Nil(t, cached)
// Update the cache and get the latest value.
real := provider.getAndUpdateContainerUsageNanoCores(test.stats)
assert.Equal(t, test.expected, real, test.desc)
// After the update, the cached value should be up-to-date
cached = provider.getContainerUsageNanoCores(test.stats)
assert.Equal(t, test.expected, cached, test.desc)
}
}
| makeFakeContainer |
sdk_driver.go | package test
import (
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/pulumi/pulumi/pkg/v3/codegen"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type ThenFunc func(t *testing.T, testDir string)
type sdkTest struct {
Directory string
Description string
Skip codegen.StringSet
Then map[string]ThenFunc
}
var sdkTests = []sdkTest{
{
Directory: "input-collision",
Description: "Schema with types that could potentially produce collisions (go).",
},
{
Directory: "dash-named-schema",
Description: "Simple schema with a two part name (foo-bar)",
},
{
Directory: "external-resource-schema",
Description: "External resource schema",
},
{
Directory: "nested-module",
Description: "Nested module",
},
{
Directory: "nested-module-thirdparty",
Description: "Third-party nested module",
},
{
Directory: "plain-schema-gh6957",
Description: "Repro for #6957",
},
{
Directory: "resource-args-python",
Description: "Resource args with same named resource and type",
Then: map[string]ThenFunc{
"go": func(t *testing.T, testDir string) {
cmd := exec.Command("go", "test", "./...")
cmd.Dir = filepath.Join(testDir, "go-program")
out, err := cmd.CombinedOutput()
if !assert.NoError(t, err) {
t.Logf("output: %v", string(out))
}
},
},
},
{
Directory: "simple-enum-schema",
Description: "Simple schema with enum types",
},
{
Directory: "simple-plain-schema",
Description: "Simple schema with plain properties",
},
{
Directory: "simple-plain-schema-with-root-package",
Description: "Simple schema with root package set", | Directory: "simple-resource-schema",
Description: "Simple schema with local resource properties",
},
{
Directory: "simple-resource-schema-custom-pypackage-name",
Description: "Simple schema with local resource properties and custom Python package name",
},
{
Directory: "simple-methods-schema",
Description: "Simple schema with methods",
},
{
Directory: "simple-yaml-schema",
Description: "Simple schema encoded using YAML",
},
{
Directory: "provider-config-schema",
Description: "Simple provider config schema",
},
{
Directory: "replace-on-change",
Description: "Simple use of replaceOnChange in schema",
},
}
// TestSDKCodegen runs the complete set of SDK code generation tests against a particular language's code generator.
//
// An SDK code generation test consists of a schema and a set of expected outputs for each language. Each test is
// structured as a directory that contains that information:
//
// test-directory/
// schema.(json|yaml)
// language-0
// ...
// language-n
//
// The schema is the only piece that must be manually authored. Once the schema has been written, the expected outputs
// can be generated by running `PULUMI_ACCEPT=true go test ./..." from the `pkg/codegen` directory.
func TestSDKCodegen(t *testing.T, language string, genPackage GenPkgSignature) {
testDir := filepath.Join("..", "internal", "test", "testdata")
for _, tt := range sdkTests {
t.Run(tt.Description, func(t *testing.T) {
if tt.Skip.Has(language) {
t.Skip()
return
}
dirPath := filepath.Join(testDir, filepath.FromSlash(tt.Directory))
schemaPath := filepath.Join(dirPath, "schema.json")
if _, err := os.Stat(schemaPath); err != nil && os.IsNotExist(err) {
schemaPath = filepath.Join(dirPath, "schema.yaml")
}
files, err := GeneratePackageFilesFromSchema(schemaPath, genPackage)
require.NoError(t, err)
if !RewriteFilesWhenPulumiAccept(t, dirPath, language, files) {
expectedFiles, err := LoadBaseline(dirPath, language)
require.NoError(t, err)
if !ValidateFileEquality(t, files, expectedFiles) {
return
}
}
if then, ok := tt.Then[language]; ok {
then(t, dirPath)
}
})
}
} | },
{ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.