max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
release/check_preamble.py | sivchand/smart_open | 2,047 | 12602731 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
"""Checks preambles of Python script files.
We want to ensure they all contain the appropriate license and copyright.
For the purposes of this script, the *preamble* is defined as the first
lines of the file starting with a hash (#). Any line that does not start
with a hash ends the preamble.
Usage::
python check_preamble.py --replace /path/to/template.py script.py
The above command reads the preamble from ``template.py``, and then copies
that preamble into ``script.py``. If ``script.py`` already contains a
preamble, then the existing preamble will be replaced **entirely**.
Processing entire subdirectories with one command::
find subdir1 subdir2 -iname "*.py" | xargs -n 1 python check_preamble.py --replace template.py
"""
import argparse
import logging
import os
import sys
def extract_preamble(fin):
end_preamble = False
preamble, body = [], []
for line in fin:
if end_preamble:
body.append(line)
elif line.startswith('#'):
preamble.append(line)
else:
end_preamble = True
body.append(line)
return preamble, body
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='the path of the file to check')
parser.add_argument('--replace', help='replace the preamble with the one from this file')
parser.add_argument('--loglevel', default=logging.INFO)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
with open(args.path) as fin:
preamble, body = extract_preamble(fin)
for line in preamble:
logging.info('%s: %s', args.path, line.rstrip())
if not args.replace:
sys.exit(0)
with open(args.replace) as fin:
preamble, _ = extract_preamble(fin)
if os.access(args.path, os.X_OK):
preamble.insert(0, '#!/usr/bin/env python\n')
with open(args.path, 'w') as fout:
for line in preamble + body:
fout.write(line)
if __name__ == '__main__':
main()
|
metadata_in_type_annotations.py | CvanderStoep/VideosSampleCode | 285 | 12602735 | <filename>metadata_in_type_annotations.py
from typing import Annotated, TypeVar, get_args
import struct2
UnsignedShort = Annotated[int, struct2.ctype('H')]
SignedChar = Annotated[int, struct2.ctype('b')]
assert get_args(UnsignedShort) == (int, struct2.ctype('H'))
class Student(struct2.Packed):
name: Annotated[str, struct2.ctype("<10s")]
serialnum: UnsignedShort
school: SignedChar
record: bytes = ...
student = Student.unpack(record)
# Student(name=b'raymond ', serialnum=4658, school=264)
record = student.pack()
T = TypeVar('T')
Const = Annotated[T, my_annotations.CONST]
class C:
def const_method(self,l: Const[list[int]]) -> int:
...
|
junc/utils/utils.py | moliushang/wireframe_ | 148 | 12602788 | from numpy.random import randn
import ref
import torch
import numpy as np
def adjust_learning_rate(optimizer, epoch, LR, LR_param):
#lr = LR * (0.1 ** (epoch // dropLR))
LR_policy = LR_param.get('lr_policy', 'step')
if LR_policy == 'step':
steppoints = LR_param.get('steppoints', [4, 7, 9, 10])
lrs = LR_param.get('lrs', [0.01, 0.001, 0.0001, 0.00001, 0.000001])
assert len(lrs) == len(steppoints) + 1
lr = None
for idx, steppoint in enumerate(steppoints):
if epoch > steppoint:
continue
elif epoch <= steppoint:
lr = lrs[idx]
break
if lr is None:
lr = lrs[-1]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def Rnd(x):
return max(-2 * x, min(2 * x, randn() * x))
def Flip(img):
return img[:, :, ::-1].copy()
def ShuffleLR(x):
for e in ref.shuffleRef:
x[e[0]], x[e[1]] = x[e[1]].copy(), x[e[0]].copy()
return x
|
doc/examples/scripts/sequence/hcn_similarity.py | alex123012/biotite | 208 | 12602814 | """
Similarity of HCN and related channels
======================================
This example creates a simple dendrogram for HCN channels and
other proteins of the *cyclic nucleotide–gated* (NCG) ion channel
superfamily.
As distance measure the deviation from sequence identity is used:
For identical sequences the deviation is 0 and for sequences with no
similarity the deviation is 1.
The tree is created using the UPGMA algorithm.
"""
# Code source: <NAME>
# License: BSD 3 clause
import biotite.sequence.io.fasta as fasta
import biotite.database.entrez as entrez
import biotite.sequence as seq
import biotite.application.clustalo as clustalo
import biotite.sequence.align as align
import biotite.sequence.phylo as phylo
import matplotlib.pyplot as plt
import biotite.sequence.graphics as graphics
UNIPROT_IDS = dict(
hHCN1 = "O60741",
hHCN2 = "Q9UL51",
hHCN3 = "Q9P1Z3",
hHCN4 = "Q9Y3Q4",
spHCN = "O76977",
hEAG1 = "O95259",
hERG1 = "Q12809",
KAT1 = "Q39128",
)
### fetch sequences for UniProt IDs from NCBI Entrez
fasta_file = fasta.FastaFile.read(entrez.fetch_single_file(
list(UNIPROT_IDS.values()), None, "protein", "fasta"
))
sequences = {
name: seq.ProteinSequence(seq_str)
for name, seq_str in zip(UNIPROT_IDS.keys(), fasta_file.values())
}
### create a simple phylogenetic tree
# create MSA
alignment = clustalo.ClustalOmegaApp.align(list(sequences.values()))
# build simple tree based on deviation from sequence identity
distances = 1 - align.get_pairwise_sequence_identity(
alignment, mode="shortest"
)
tree = phylo.upgma(distances)
### plot the tree
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
graphics.plot_dendrogram(
ax, tree, orientation="left", labels=list(UNIPROT_IDS.keys()),
show_distance=False, linewidth=2
)
ax.grid(False)
ax.set_xticks([])
# distance indicator
indicator_len = 0.1
indicator_start = (
ax.get_xlim()[0] + ax.get_xlim()[1]*0.02,
ax.get_ylim()[1] - ax.get_ylim()[1]*0.15
)
indicator_stop = (
indicator_start[0] + indicator_len,
indicator_start[1]
)
indicator_center = (
(indicator_start[0] + indicator_stop[0])/2,
(indicator_start[1] + 0.25)
)
ax.annotate(
"", xy=indicator_start, xytext=indicator_stop, xycoords="data",
textcoords="data", arrowprops={"arrowstyle": "|-|", "linewidth": 2}
)
ax.annotate(
f"{int(indicator_len * 100)} %", xy=indicator_center,
ha="center", va="center"
)
ax.set_title("Sequence deviation of HCN to other CNG superfamily channels")
plt.show()
|
samples/archive/hls4ml/toynn.py | zzzDavid/heterocl | 236 | 12602820 | import heterocl as hcl
import numpy as np
import numpy.testing as tst
import os
import urllib.request
from hlib.op.extern import (
create_extern_module, register_extern_ip,
register_tensors, include_dependency)
@register_extern_ip(type="vhls")
def toynn_vhls_ip(input_1, output_1, name=None):
if name is None: name = "myproject"
# Function behavior definition
with hcl.Stage("ExternModule.toyNN") as Module:
register_tensors([input_1, output_1])
Module.ext_ip_name = name
Module.inputs = [input_1, output_1]
# Include cpp/hpp files
if not os.path.exists("firmware"):
urllib.request.urlretrieve("https://raw.githubusercontent.com/Hecmay/debug.trace/main/toynn.tar.gz", filename="toynn.tar.gz")
os.system("tar -zxvf toynn.tar.gz")
source = [
"firmware/myproject.cpp",
"firmware/nnet_utils/",
"firmware/weights/"
]
Module.source = include_dependency(source)
create_extern_module(Module, ip_type="HLS")
def test_toy_nn():
dtype = hcl.Float(32)
hcl.init(dtype)
input_1 = hcl.placeholder((16,), dtype=dtype, name="input_1")
output_1 = hcl.placeholder((5,), dtype=dtype, name="output_1")
def math_func(input_1, output_1):
toynn_vhls_ip(input_1, output_1)
target = hcl.Platform.aws_f1
s = hcl.create_schedule([input_1, output_1], math_func)
s.to(input_1, target.xcel)
s.to(output_1, target.host)
target.config(compiler="vitis", mode="debug")
code = hcl.build(s, target)
assert "nnet::softmax" in code, code
os.system("rm -rf firmware toynn.tar.gz")
if __name__ == "__main__":
test_toy_nn()
|
sparse_operation_kit/documents/source/util.py | ShaunHeNJU/DeepRec-1 | 292 | 12602841 | <filename>sparse_operation_kit/documents/source/util.py
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import base64
def convert_pic_to_base64(picname):
with open(picname, "rb") as pic:
ls_f = base64.b64encode(pic.read())
print(ls_f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--picname", "-p", type=str,
help="the picture name",
required=True)
args = parser.parse_args()
convert_pic_to_base64(args.picname) |
artemis/plotting/test_easy_plotting.py | peteroconnor-bc/artemis | 235 | 12602847 | <reponame>peteroconnor-bc/artemis
import numpy as np
from artemis.plotting.easy_plotting import ezplot
__author__ = 'peter'
class DataContainer(object):
def __init__(self, im, line, struct, text, number):
self._im = im
self._line = line
self._struct = struct
self._text = text
self._number = number
def test_easy_plot():
thing = DataContainer(
im =np.random.randn(30, 40),
line = np.sin(np.arange(100)/10.),
struct = {'video': np.random.randn(17, 20, 30)},
text = 'adsagfdsf',
number = 5
)
ezplot(thing, hang = False)
def test_plot_wmat():
wmat = np.random.randn(7, 28, 28)
ezplot(wmat, hang = False)
if __name__ == '__main__':
test_plot_wmat()
test_easy_plot()
|
saleor/graphql/warehouse/tests/benchmark/test_stocks.py | eanknd/saleor | 1,392 | 12602874 | <filename>saleor/graphql/warehouse/tests/benchmark/test_stocks.py
import pytest
from .....warehouse.models import Stock, Warehouse
from ....tests.utils import get_graphql_content
@pytest.fixture
def stocks(address, variant):
warehouses = Warehouse.objects.bulk_create(
[
Warehouse(
address=address.get_copy(),
name=f"Warehouse {i}",
slug=f"warehouse_{i}",
email=f"<EMAIL>",
)
for i in range(10)
]
)
return Stock.objects.bulk_create(
[
Stock(warehouse=warehouse, product_variant=variant)
for warehouse in warehouses
]
)
STOCKS_QUERY = """
query {
stocks(first: 100) {
edges {
node {
id
warehouse {
name
}
}
}
}
}
"""
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_stocks_query(
staff_api_client,
stocks,
permission_manage_products,
count_queries,
):
get_graphql_content(
staff_api_client.post_graphql(
STOCKS_QUERY,
permissions=[permission_manage_products],
check_no_permissions=False,
)
)
|
jnpr/openclos/cli_parser.py | rohitt29/OpenClos | 114 | 12602893 | #------------------------------------------------------------------------------
# cli_parser.py
#------------------------------------------------------------------------------
'''
@author : rgiyer
Date : October 20th, 2014
This module is responsible for parsing command model defined in
cliCommands.yaml and providing functions for:
- Validation of user-input
- invoking execution handle for CLI commands or macro expansions
- determine possible arg match for command auto-completion based
on context
'''
# Standard Python libraries
import os
import re
import subprocess
import inspect
import readline
# Packages required for openclos
import yaml
import collections
#import yamlordereddictloader
# openclos classes
import util
import propLoader
# cli related classes
from cli_handle_impl import CLIImplementor
global_needle = None
entered_macro = []
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLICommand:
def __init__ ( self, cmd_access, cmd_handle, cmd_macro, cmd_macroname, cmd_desc ):
self.cmd_access = cmd_access
self.cmd_handle = cmd_handle
self.cmd_macro = cmd_macro
self.cmd_macroname = cmd_macroname
self.cmd_desc = cmd_desc
# end class CLICommand
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLIUtil:
def __init__ ( self ):
commandConfFile = os.path.join ( propLoader.propertyFileLocation,
'cliCommands.yaml' )
self.yaml_file_stream = open ( commandConfFile, 'r' )
raw_graph = yaml.load ( self.yaml_file_stream )
#raw_graph = yaml.load(self.yaml_file_stream, Loader=yamlordereddictloader.Loader)
#self.cmd_graph = {}
self.cmd_graph=collections.OrderedDict()
self.indentation = 8
self.dump_cmd ( raw_graph )
self.yaml_file_stream.close ()
#------------------------------------------------------------------------------
def get_implementor_handle ( self, class_instance, handle_name ):
handles = inspect.getmembers ( class_instance,
predicate = inspect.ismethod )
for function_tuple in handles:
if ( handle_name == function_tuple [ 0 ] ):
return function_tuple [ 1 ]
# no match found
return 0
#------------------------------------------------------------------------------
# Parse through the dictionary iteratively:
def dump_cmd ( self,
cmds,
cmd_root="",
cmd_access="READ",
cmd_handle="",
cmd_macro="",
cmd_macroname="",
cmd_desc="", *args ):
for cmd in cmds:
if ( cmd_root == "" ):
cmd_compound = cmd
else:
cmd_compound = cmd_root + "_" + cmd
cmd_data = cmds [ cmd ]
# Get command access
if cmd_data.has_key ( "Access" ):
cmd_access = cmd_data [ "Access" ]
# Get command handler
if cmd_data.has_key ( "Handle" ):
cmd_handle = cmd_data [ "Handle" ]
elif ( cmd_handle != "" ):
cmd_handle = ""
# Get command macro
if cmd_data.has_key ( "Macro" ):
cmd_macro = cmd_data [ "Macro" ]
elif ( cmd_macro != "" ):
cmd_macro = ""
if cmd_data.has_key ( "MacroName" ):
if "-" in cmd_data [ "MacroName" ]:
print "Macro name cannot contain the character '-'"
print "Excluding the handle"
print cmd_compound + "_<" + cmd_data["MacroName"] +">"
break
cmd_macroname = cmd_data [ "MacroName" ]
cmd_compound = cmd_compound + "_<" + cmd_macroname + ">"
elif ( cmd_macroname != "" ):
cmd_macroname = ""
# Get command description
if cmd_data.has_key ( "Desc" ):
cmd_desc = cmd_data [ "Desc" ]
elif ( cmd_desc != "" ):
cmd_desc = ""
if cmd_data.has_key ( "Handle" ):
#if cmd_data.has_key ( "MacroName" ):
self.cmd_graph [ cmd_compound ] = CLICommand ( cmd_access,
cmd_handle,
cmd_macro,
cmd_macroname,
cmd_desc )
if ( len ( cmd_compound ) > self.indentation ):
self.indentation = len ( cmd_compound )
# Parse the arguments
if cmd_data.has_key ( "Args" ):
cmd_args = cmd_data [ "Args" ]
self.dump_cmd ( cmd_args,
cmd_compound,
cmd_access,
cmd_handle,
cmd_macro,
cmd_macroname,
cmd_desc )
#------------------------------------------------------------------------------
def normalize_command ( self, cmd ):
return cmd.replace ( " ", "_" )
#------------------------------------------------------------------------------
def return_graph (self):
return self.cmd_graph
#------------------------------------------------------------------------------
def get_indentation ( self, cmd ):
return ( self.indentation + 8 - len ( cmd ) )
#------------------------------------------------------------------------------
def suffix_macro_to_cmd ( self, macro_list, cmd ):
ret_cmd = []
for macro in macro_list:
ret_cmd.append ( self.normalize_command ( cmd + "_" + macro ) )
return ret_cmd
#------------------------------------------------------------------------------
def get_macro_list ( self, class_instance, macro_txt, add_help=None ):
fn_macro = self.get_implementor_handle ( class_instance, macro_txt )
prev_macro = self.get_previous_macro()
return fn_macro ( prev_macro, add_help )
#------------------------------------------------------------------------------
def include_macro ( self, macro_list, ret_list ):
for item in macro_list:
ret_list.append ( item )
#------------------------------------------------------------------------------
def string_has_enter ( self, string ):
if ( re.search ( "<enter>", string ) != None ):
return 1
else:
return 0
#------------------------------------------------------------------------------
def add_enter_instruction ( self, result_list ):
if ( len ( result_list ) ):
string = result_list [ 0 ]
if ( self.string_has_enter ( string ) == 1 ):
return 0
result_list.insert ( 0, " <enter>" + " " * self.get_indentation ( "<enter" ) + "Execute the current command" )
#------------------------------------------------------------------------------
def match_macro ( self, macro_list, needle, ret_list ):
global entered_macro
for haystack in macro_list:
if ( re.match ( needle, haystack ) != None ):
if ( len ( needle ) == len ( haystack ) ):
self.add_enter_instruction ( ret_list )
entered_macro.append(haystack)
elif ( len ( needle ) < len ( haystack ) ):
if haystack not in ret_list:
ret_list.append ( haystack )
entered_macro.append(haystack)
#else:
#print ""
#------------------------------------------------------------------------------
def option_exists ( self, consider_option, ret_list ):
for option in ret_list:
if ( re.match ( option, consider_option ) != None ):
return 1
return 0
#------------------------------------------------------------------------------
def complete_command ( self,
part_cmd,
full_cmd,
end_index,
cmd_helper,
ret_list ):
unmatched_string = full_cmd [ end_index: ]
# This is an adjustment for "<space>" before tab / ? keypress
if ( part_cmd [ -1 ] == "_" ):
part_cmd = part_cmd [ 0:-1 ]
unmatched_string = "_" + unmatched_string
if ( unmatched_string [ 0 ] == "_" ):
# attach possible matches
possible_option = unmatched_string.replace ( "_", " " ) + ( " " * self.get_indentation ( full_cmd ) )
possible_option = possible_option + "<" + cmd_helper.cmd_desc + ">"
ret_list.append ( possible_option )
else:
# Get part of the command from part_cmd
match_object = re.search ( "_", part_cmd )
while ( match_object != None ):
part_cmd = part_cmd [ match_object.end (): ]
match_object = re.search ( "_", part_cmd )
# Get rest of the command from unmatched_string
match_object = re.search ( "_", unmatched_string )
if ( match_object != None ):
unmatched_string = unmatched_string [ :(match_object.end()-1)]
complete_word = part_cmd + unmatched_string
if ( self.option_exists ( complete_word, ret_list ) == 0 ):
ret_list.append ( complete_word )
return ret_list
#------------------------------------------------------------------------------
def get_all_cmds ( self ):
ret_list = []
for cmd in self.cmd_graph:
cmd_str = cmd.replace ( "_", " " )
cmd_str = cmd_str + ( " " * self.get_indentation ( cmd ) ) + "<" + self.cmd_graph [ cmd ].cmd_desc + ">"
ret_list.append ( cmd_str )
return ret_list
#------------------------------------------------------------------------------
# Lot of reference here to needle and haystack, needle being the current
# command context of the CLI, and haystack being the command model dict
# created during CLIUtil instantiation
#------------------------------------------------------------------------------
def get_match (self, needle):
global global_needle
global_needle = needle
macro_dict = {}
ret_list = []
# flag variable to denote if macro has been appended
flag=0
if len(needle)==0 or re.search("[a-z|A-Z|0-9]", needle)==None:
return self.get_all_cmds()
if needle[-1]==" ":
needle=needle[0:-1]
needle = self.normalize_command(needle)
while needle[-1]=="_":
needle=needle[0:-1]
for haystack_orig in self.cmd_graph:
cmd_helper = self.cmd_graph [ haystack_orig ]
# Creating macro list for easy lookup and retrieval
if cmd_helper.cmd_macro!="":
cmd_macro_list = self.get_macro_list(CLIImplementor(),cmd_helper.cmd_macro)
macro_dict[cmd_helper.cmd_macroname]=cmd_macro_list
# For regex operations
haystack = haystack_orig.replace("<","(?P<")
haystack = haystack.replace(">", ">.*)")
# Matching using regex search and match
match_macros = re.search(haystack,needle)
if len(haystack_orig)<len(needle):
match_object = re.match(haystack_orig,needle)
else:
match_object = re.match(needle,haystack_orig)
# Complete partially entered command
if match_object!=None:
balance_haystack = haystack_orig[match_object.end():]
if balance_haystack!="":
if balance_haystack[1]=="<" and cmd_helper.cmd_macro!="":
# check to retrieve corresponding macro list
if cmd_helper.cmd_macroname in haystack_orig.partition(">")[0]:
self.include_macro(macro_dict[cmd_helper.cmd_macroname],ret_list)
break
haystack_orig=haystack_orig.replace(" ","_")
self.complete_command(needle,haystack_orig,match_object.end(), cmd_helper, ret_list)
else:
self.add_enter_instruction ( ret_list )
# Compare and complete macros
elif match_macros!=None:
for macro_name in macro_dict.keys():
if macro_name in cmd_helper.cmd_macroname:
# try-catch block to get all match groups
try:
macro_needle = match_macros.group(macro_name)
if "_" in macro_needle:
continue
for each_macro in macro_dict[macro_name]:
if macro_needle in each_macro:
self.match_macro(macro_dict[macro_name],macro_needle,ret_list)
flag=1
break
if flag==0:
#print "Invalid macro. Possible options:"
self.include_macro(macro_dict[macro_name],ret_list)
except IndexError:
break
# Find point of match and return remaining command
else:
needle_temp = needle
haystack_temp = haystack_orig
# loop till all macros of commands are validated
while True:
index_of_diff = 0
for char_a, char_b in zip(list(haystack_temp), list(needle_temp)):
if char_a!=char_b:
break
index_of_diff=index_of_diff+1
if index_of_diff!=0:
macro_needle = needle_temp[index_of_diff:]
macro_needle = macro_needle.split("_",1)[-1]
balance_haystack = haystack_temp[index_of_diff:]
if balance_haystack[0]=="_":
balance_haystack=balance_haystack[1:]
if balance_haystack[0]!="<":
match_object = re.match(macro_needle,balance_haystack)
if match_object!=None and flag==0:
end_pos = haystack_orig.find(balance_haystack)
self.complete_command(haystack_orig[:end_pos],haystack_orig,end_pos, cmd_helper, ret_list)
if balance_haystack[0]=="<":
balance_haystack=balance_haystack.split("_",1)[-1]
match_object = re.match(macro_needle, balance_haystack)
for key in macro_dict:
if macro_needle in macro_dict[key]:
end_pos = haystack_orig.find(balance_haystack)
self.complete_command(haystack_orig[:end_pos],haystack_orig,end_pos, cmd_helper, ret_list)
# When needle ends with a macro
if match_object==None or macro_needle=="":
#print "Incorrect command. Possible options:"
if balance_haystack[0]=="<" and cmd_helper.cmd_macro!="":
self.include_macro(macro_dict[cmd_helper.cmd_macroname],ret_list)
else:
if flag==0:
end_pos = haystack_orig.find(balance_haystack)
self.complete_command(haystack_orig[:end_pos],haystack_orig,end_pos, cmd_helper, ret_list)
break
# When needle extends beyond current macro
else:
haystack_temp = balance_haystack
needle_temp = macro_needle
else:
break
else:
break
return ret_list
#------------------------------------------------------------------------------
def chomp ( self, token ):
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
return token
#------------------------------------------------------------------------------
def validate_command_and_execute ( self, full_cmd_context ):
# We will do the validation again in case this function is called
# outside the CLI context
best_cmd_match = ""
best_cmd_args = ""
best_cmd_handle = None
for command in self.cmd_graph:
cmd_helper = self.cmd_graph[command]
# For regex operations
command_temp = command.replace("<","(?P<")
command_temp = command_temp.replace(">", ">.*)")
match_object = re.match ( command_temp,
self.normalize_command ( full_cmd_context ) )
if ( match_object != None ):
# Okay - we found a match. Get macros if included
command_args = ""
match_macros = re.search ( command_temp,self.normalize_command(full_cmd_context))
if match_macros != None and cmd_helper.cmd_macroname!="":
macro_needle = match_macros.group(cmd_helper.cmd_macroname)
command_args = macro_needle
#if ( len ( full_cmd_context ) > len ( command ) ):
#command_args = self.chomp ( full_cmd_context [ match_object.end (): ] )
command_args = self.chomp (command_args)
if ( len ( best_cmd_match ) < len ( command ) ):
best_cmd_match = command
best_cmd_args = command_args
best_cmd_handle = self.get_implementor_handle ( CLIImplementor (), self.cmd_graph [ command ].cmd_handle )
if ( best_cmd_handle != 0 ):
return best_cmd_handle ( best_cmd_args )
else:
print self.cmd_graph [ best_cmd_match ].cmd_handle + " not implemented"
#------------------------------------------------------------------------------
def print_results ( self, result_list ):
for result in result_list:
print "\t" + result
#------------------------------------------------------------------------------
def print_command_graph ( self, cmd_dict ):
for keys in cmd_dict:
print keys + "=>"
cmd = cmd_dict [ keys ]
if ( cmd.cmd_desc != "" ):
print " " + cmd.cmd_desc
print " " + cmd.cmd_access
if ( cmd.cmd_macro != "" ):
fn_macro = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_macro )
if ( fn_macro != 0 ):
print fn_macro ()
else:
print " Macro not implemented"
if ( cmd.cmd_handle != "" ):
fn_handle = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_handle )
if ( fn_handle != 0 ):
fn_handle ()
else:
print " Handler not implemented"
#------------------------------------------------------------------------------
def get_previous_macro(self):
global global_needle
global entered_macro
if global_needle[-1]==" ":
global_needle=global_needle[0:-1]
global_needle = cli_util.normalize_command(global_needle)
prev_macro = None
for each_macro in entered_macro:
if each_macro in global_needle:
prev_macro = each_macro
return prev_macro
# end class CLIUtil
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
cli_util = CLIUtil ()
match_options = [ "create",
# "create cabling-plan",
# "create cabling-",
# "create cabling",
# "create cabling-plan pod",
# "create cabling-plan pod pod_2",
# "create",
# "create dev",
# "create device-config",
# "create device-config p",
# "create device-config pod",
# "create device-config pod pod_1",
# "run",
# "update password",
# "run r",
# "run RE",
# "create cab",
"update",
"deploy",
"run",
"" ]
if __name__ == '__main__':
for match in match_options:
print "Matching results for " + match + " is:"
cli_util.print_results ( cli_util.get_match ( match ) )
print "------------------------------------------------------"
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmics_Output_cff.py | ckamtsikis/cmssw | 852 | 12602898 | # Author : <NAME>
# Date : July 19th, 2007
# last update: $Date: 2011/02/09 09:10:11 $ by $Author: cerminar $
import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using Cosmic muon events
OutALCARECOTkAlCosmics_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlCosmicsCTF',
'pathALCARECOTkAlCosmicsCosmicTF',
'pathALCARECOTkAlCosmicsRegional')
),
outputCommands = cms.untracked.vstring(
# 'keep *_ALCARECOTkAlCosmics*_*_*', # keeps also 0T ones if in same job
'keep *_ALCARECOTkAlCosmicsCTF_*_*',
'keep *_ALCARECOTkAlCosmicsCosmicTF_*_*',
'keep *_ALCARECOTkAlCosmicsRegional_*_*',
'keep siStripDigis_DetIdCollection_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep Si*Cluster*_si*Clusters_*_*', # for cosmics keep original clusters
'keep recoMuons_muons1Leg_*_*') # save muons as timing info is needed for BP corrections in deconvolution
)
import copy
OutALCARECOTkAlCosmics = copy.deepcopy(OutALCARECOTkAlCosmics_noDrop)
OutALCARECOTkAlCosmics.outputCommands.insert(0, "drop *")
|
chapter2/chapter2_basics_05.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | 107 | 12602938 | <gh_stars>100-1000
def retrieve_page(page):
if page > 3:
return {"next_page": None, "items": []}
return {"next_page": page + 1, "items": ["A", "B", "C"]}
items = []
page = 1
while page is not None:
page_result = retrieve_page(page)
items += page_result["items"]
page = page_result["next_page"]
print(items) # ["A", "B", "C", "A", "B", "C", "A", "B", "C"]
|
care/facility/migrations/0149_auto_20200802_2156.py | gigincg/care | 189 | 12602981 | # Generated by Django 2.2.11 on 2020-08-02 16:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('facility', '0148_populate_unencrypted_patients'),
]
operations = [
migrations.RenameField(
model_name='historicalpatientregistration',
old_name='address',
new_name='address_old',
),
migrations.RenameField(
model_name='historicalpatientregistration',
old_name='name',
new_name='name_old',
),
migrations.RenameField(
model_name='historicalpatientregistration',
old_name='phone_number',
new_name='phone_number_old',
),
migrations.RenameField(
model_name='patientregistration',
old_name='address',
new_name='address_old',
),
migrations.RenameField(
model_name='patientregistration',
old_name='name',
new_name='name_old',
),
migrations.RenameField(
model_name='patientregistration',
old_name='phone_number',
new_name='phone_number_old',
),
]
|
alipay/aop/api/domain/Appinfos.py | antopen/alipay-sdk-python-all | 213 | 12602987 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Appinfos(object):
def __init__(self):
self._app_name = None
self._app_type = None
self._mini_app_id = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_type(self):
return self._app_type
@app_type.setter
def app_type(self, value):
self._app_type = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.app_type:
if hasattr(self.app_type, 'to_alipay_dict'):
params['app_type'] = self.app_type.to_alipay_dict()
else:
params['app_type'] = self.app_type
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Appinfos()
if 'app_name' in d:
o.app_name = d['app_name']
if 'app_type' in d:
o.app_type = d['app_type']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
return o
|
src/tcclib/payloadobj.py | CollectiveDS/tccprofile | 244 | 12603016 | """Payload Object"""
from datetime import datetime
from uuid import uuid1
_NOW = datetime.now().strftime('%Y-%m-%d-%H%M%S')
class ServicesDict:
_REQ_ATTRS = ['identifier',
'identifier_type',
'csreq',
'allowed']
_TCC_SERVICE_PAYLOAD_MAP = {'identifier': 'Identifier',
'identifier_type': 'IdentifierType',
'csreq': 'CodeRequirement',
'allowed': 'Authorization',
'apple_events_identifier': 'AEReceiverIdentifier',
'apple_events_identifier_type': 'AEReceiverIdentifierType',
'apple_events_csreq': 'AEReceiverCodeRequirement'}
def __init__(self, **kwargs):
"""Services Dict"""
if not all([_a in kwargs for _a in self.__class__._REQ_ATTRS]):
raise AttributeError('{} attributes required.'.format(self.__class__._REQ_ATTRS))
self.service = dict()
for _k, _v in kwargs.items():
_attr = self.__class__._TCC_SERVICE_PAYLOAD_MAP.get(_k, None)
# Only include the payload attributes that have a value.
if _attr and _v:
self.service[_attr] = _v
def __hash__(self):
if not isinstance(self, self.__class__):
return NotImplemented
else:
_hash_str = ','.join(map(str, [self.__dict__.get(_k, 'None')
for _k in self.__class__._REQ_ATTRS]))
return hash(_hash_str)
def __eq__(self, other):
"""Equal."""
if not isinstance(other, self.__class__):
return NotImplemented
else:
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Not Equal."""
if not isinstance(other, self.__class__):
return NotImplemented
else:
return not self.__eq__(other)
class PayloadContentDict:
"""PayloadContent Dict"""
_UUID = str(uuid1()).upper()
_REQ_ATTRS = {'PayloadDescription': 'PPPCP profile',
'PayloadDisplayName': 'PPPCP profile',
'PayloadIdentifier': 'com.github.carlashley.tccprofile.{}'.format(_UUID),
'PayloadOrganization': 'com.github.carlashley.tccprofile',
'PayloadType': 'com.apple.TCC.configuration-profile-policy',
'PayloadUUID': _UUID,
'PayloadVersion': 1,
'Services': None}
_MUTABLE_KEYS = ['PayloadIdentifier',
'PayloadOrganization']
def __init__(self, services, **kwargs):
"""PPPCP Payload Content"""
if not isinstance(services, dict):
raise TypeError('\'services\' must be \'dict\'.')
self.payload_content = self.__class__._REQ_ATTRS.copy()
for _k, _v in kwargs.items():
if _k in self.__class__._MUTABLE_KEYS:
if _k == 'PayloadIdentifier':
_v = '{}.{}'.format(_v, self.__class__._UUID)
self.payload_content[_k] = _v
self.payload_content['Services'] = services
class ProfileDict:
_UUID = str(uuid1()).upper()
_REQ_ATTRS = {'PayloadDescription': 'PPPCP Profile generated from TCC databases or templates.',
'PayloadDisplayName': 'PPPCP Profile Generated {}'.format(_NOW),
'PayloadIdentifier': 'com.github.carlashley.tccprofile',
'PayloadOrganization': 'com.github.carlashley.tccprofile',
'PayloadRemovalDisallowed': True,
'PayloadScope': 'system',
'PayloadType': 'Configuration',
'PayloadUUID': _UUID,
'PayloadVersion': 1}
_MUTABLE_KEYS = ['PayloadDescription',
'PayloadDisplayName',
'PayloadIdentifier',
'PayloadOrganization',
'PayloadRemovalDisallowed']
def __init__(self, payload_content, **kwargs):
"""Profile Dict"""
self.payload = self.__class__._REQ_ATTRS.copy()
for _k, _v in kwargs.items():
if _k in self.__class__._MUTABLE_KEYS:
self.payload[_k] = _v
self.payload['PayloadContent'] = [payload_content]
|
orguniqueview.py | Sinamore/orgextended | 120 | 12603033 | <reponame>Sinamore/orgextended
import sublime, sublime_plugin
import logging
log = logging.getLogger(__name__)
ViewMappings = {}
def CreateUniqueViewNamed(name, syntax):
# Close the view if it exists
win = sublime.active_window()
for view in win.views():
if view.name() == name:
win.focus_view(view)
win.run_command('close')
win.run_command('new_file')
view = win.active_view()
view.set_name(name)
view.set_syntax_file("Packages/OrgExtended/{}.sublime-syntax".format(syntax))
return view
def CreateOrFindUniqueViewNamed(name, syntax):
# Return the view if it exists
win = sublime.active_window()
for view in win.views():
if view.name() == name:
win.focus_view(view)
return view
win.run_command('new_file')
view = win.active_view()
view.set_name(name)
view.set_syntax_file("Packages/OrgExtended/{}.sublime-syntax".format(syntax))
return view
def IsViewActuallyActive(name):
win = sublime.active_window()
for view in win.views():
if view.name() == name:
return True
return False
def MoveViewToOtherGroup(view,myview):
window = sublime.active_window()
if (window.num_groups() < 2):
#self.window.run_command('clone_file')
window.set_layout({
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]]
})
mygroup = 0
othergroup = 1
else:
window.focus_view(view)
mygroup = 1
othergroup = 0
if (window.get_view_index(myview)[0] == 0):
othergroup = 1
mygroup = 0
window.focus_view(view)
window.run_command('move_to_group', {'group': othergroup})
window.run_command('focus_group', {'group': mygroup})
window.focus_view(myview)
class UniqueView:
def __init__(self, name, syntax, reuse=False,curview=None):
self.name = name
if(reuse):
self._view = CreateOrFindUniqueViewNamed(name,syntax=syntax)
else:
self._view = CreateUniqueViewNamed(name,syntax=syntax)
self._view.set_name(self.name)
if(curview != None):
MoveViewToOtherGroup(self._view,curview)
self._view.set_read_only(True)
self._view.set_scratch(True)
# View mappings is required so we can return the
# UniqueView rather than the view
ViewMappings[name] = self
@property
def view(self):
return self._view
@staticmethod
def Get(name,syntax="OrgExtended",reuse=True,curview=None):
if(name in ViewMappings and IsViewActuallyActive(name)):
return ViewMappings[name]
else:
return UniqueView(name,syntax,reuse,curview)
@staticmethod
def IsShowing(name):
return IsViewActuallyActive(name)
|
concordia/signals/signals.py | juliecentofanti172/juliecentofanti.github.io | 134 | 12603039 | import django.dispatch
reservation_obtained = django.dispatch.Signal(
providing_args=["asset_pk", "reservation_token"]
)
reservation_released = django.dispatch.Signal(
providing_args=["asset_pk", "reservation_token"]
)
|
code/LFW/evaluate.py | huangyangyu/SeqFace | 136 | 12603063 | #!/usr/bin/env python
#coding: utf-8
#author: huangyangyu
layer_num = 27
#layer_num = 64
import os
import sys
import gflags
import cPickle
import numpy as np
root_dir = os.path.dirname(os.path.abspath("__file__")) + "/../../"
sys.path.append(root_dir + "code/")
if not gflags.FLAGS.has_key("model_dir"):
gflags.DEFINE_string("model_dir", root_dir + "model/ResNet-%d/" % layer_num, "set model dir")
if not gflags.FLAGS.has_key("feature_layer_names"):
gflags.DEFINE_string("feature_layer_names", "['fc5']", "set feature layer names")
if not gflags.FLAGS.has_key("device_id"):
gflags.DEFINE_integer("device_id", 0, "set device id")
if not gflags.FLAGS.has_key("ratio"):
gflags.DEFINE_float("ratio", -1.0, "set image ratio")
if not gflags.FLAGS.has_key("scale"):
gflags.DEFINE_float("scale", 1.1, "set image scale")
if not gflags.FLAGS.has_key("resize_height"):
gflags.DEFINE_integer("resize_height", 144, "set image height")
if not gflags.FLAGS.has_key("resize_width"):
gflags.DEFINE_integer("resize_width", 144, "set image width")
if not gflags.FLAGS.has_key("raw_scale"):
gflags.DEFINE_float("raw_scale", 255.0, "set raw scale")
if not gflags.FLAGS.has_key("input_scale"):
gflags.DEFINE_float("input_scale", 0.0078125, "set raw scale")
if not gflags.FLAGS.has_key("gray"):
gflags.DEFINE_boolean("gray", False, "set gray")
if not gflags.FLAGS.has_key("oversample"):
gflags.DEFINE_boolean("oversample", False, "set oversample")
from featurer import Featurer
def cos_sim(v1, v2):
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
dist = np.linalg.norm(np.array(v1)-np.array(v2))
cos = 1 - dist * dist / 2
return cos
def test():
image_dir = root_dir + "data/LFW/"
# pairs
image_files = set()
pairs = list()
for k, line in enumerate(open(image_dir + "pairs.txt")):
item = line.strip().split()
item[0] = image_dir + "images/" + item[0]
item[1] = image_dir + "images/" + item[1]
assert len(item) == 3
pairs.append(tuple(item))
image_files.add(item[0])
image_files.add(item[1])
# features
feature_file = image_dir + "feature_%d.pkl" % layer_num
if not os.path.exists(feature_file):
gflags.FLAGS(sys.argv)
model_dir = gflags.FLAGS.model_dir + "/"
featurer = Featurer(deploy_prototxt=model_dir + "deploy.prototxt", \
model_file=model_dir + "train.caffemodel", \
mean_file=model_dir + "mean.binaryproto", \
ratio_file=model_dir + "ratio.txt", \
label_file=model_dir + "label.txt", \
device_id=gflags.FLAGS.device_id, \
ratio=gflags.FLAGS.ratio, \
scale=gflags.FLAGS.scale, \
resize_height=gflags.FLAGS.resize_height, \
resize_width=gflags.FLAGS.resize_width, \
raw_scale=gflags.FLAGS.raw_scale, \
input_scale=gflags.FLAGS.input_scale, \
gray=gflags.FLAGS.gray, \
oversample=gflags.FLAGS.oversample, \
feature_layer_names=eval(gflags.FLAGS.feature_layer_names))
features = dict()
for k, image_file in enumerate(image_files):
if not features.has_key(image_file):
features[image_file.replace(root_dir, "")] = featurer.test(image_file=image_file)
print "processed:", k
sys.stdout.flush()
cPickle.dump(features, open(feature_file, "wb"))
else:
features = cPickle.load(open(feature_file, "rb"))
# sims
sims = list()
for pair in pairs:
image_file1, image_file2, tag = pair[:3]
# person1
feature1 = features[image_file1.replace(root_dir, "")]
# person2
feature2 = features[image_file2.replace(root_dir, "")]
# sim
sim = cos_sim(feature1, feature2)
sims.append((sim, int(tag), image_file1, image_file2))
sims = sorted(sims, key=lambda item: item[0])
# roc
tn = 0
fn = 0
tp = len(filter(lambda item: item[1]==1, sims))
fp = len(filter(lambda item: item[1]==0, sims))
best_accuracy = 0.0
best_thred = 0.0
with open(image_dir + "roc_%d.txt" % layer_num, "wb") as f:
for k, sim in enumerate(sims):
thred, tag, image_file1, image_file2 = sim
if tag == 0:
tn += 1
fp -= 1
else:
fn += 1
tp -= 1
tpr = 1.0 * tp / max(tp + fn, 1)
fnr = 1.0 * fn / max(tp + fn, 1)
tnr = 1.0 * tn / max(tn + fp, 1)
fpr = 1.0 * fp / max(tn + fp, 1)
accuracy = 1.0 * (tp + tn) / (tp + fp + tn + fn)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_thred = thred
f.write("%.6f %.6f\n" % (tpr, fpr))
print "best:", len(pairs), best_thred, best_accuracy
if __name__ == "__main__":
test()
|
backdoors/shell/__pupy/pupy/pupysh.py | mehrdad-shokri/backdoorme | 796 | 12603125 | <reponame>mehrdad-shokri/backdoorme<filename>backdoors/shell/__pupy/pupy/pupysh.py
#!/usr/bin/env python
# -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, <NAME> (<EMAIL>)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
import pupylib.PupyServer
import pupylib.PupyCmd
import logging
import time
import traceback
import argparse
import os
import os.path
__author__='<NAME>'
__version__='v1.0.1-alpha'
def print_version():
print("Pupy - %s"%(__version__))
if __name__=="__main__":
if os.path.dirname(__file__):
os.chdir(os.path.dirname(__file__))
parser = argparse.ArgumentParser(prog='ptrconsole', description="Pupy console")
parser.add_argument('--log-lvl', help="change log verbosity", dest="loglevel", choices=["DEBUG","INFO","WARNING","ERROR"], default="WARNING")
parser.add_argument('--version', help="print version and exit", action='store_true')
args=parser.parse_args()
if args.version:
print_version()
exit(0)
loglevel=logging.WARNING
if args.loglevel=="ERROR":
loglevel=logging.ERROR
elif args.loglevel=="DEBUG":
loglevel=logging.DEBUG
elif args.loglevel=="INFO":
loglevel=logging.INFO
else:
loglevel=logging.WARNING
logging.basicConfig(format='%(asctime)-15s - %(levelname)-5s - %(message)s')
logging.getLogger().setLevel(loglevel)
pupyServer=pupylib.PupyServer.PupyServer()
try:
import __builtin__ as builtins
except ImportError:
import builtins
builtins.glob_pupyServer=pupyServer # dirty ninja trick for this particular case avoiding to touch rpyc source code
pupyServer.start()
pcmd=pupylib.PupyCmd.PupyCmd(pupyServer)
while True:
try:
pcmd.cmdloop()
except Exception as e:
print(traceback.format_exc())
pcmd.intro=''
|
cd/canary-azure-devops/pipeline_configs/read_config.py | ganesh-k13/citrix-k8s-ingress-controller | 315 | 12603145 | <reponame>ganesh-k13/citrix-k8s-ingress-controller
import argparse
import json
class ConfigReader(object):
@staticmethod
def populate_env(input_params):
print(input_params)
if input_params.action == 'delete':
print('##vso[task.setvariable variable=TEARDOWN_FLAG]True')
with open(input_params.config_file) as json_file:
json_data = json.load(json_file)
print(json_data)
if json_data.keys():
for param, value in json_data.iteritems():
print('##vso[task.setvariable variable={}]{}'.format(param, value))
def main(input_params):
ConfigReader.populate_env(input_params)
def parse_arguments():
parser = argparse.ArgumentParser(description='Config Reader')
parser.add_argument('--config_file', nargs='?', default=None,
help='json file path')
parser.add_argument('--action', nargs='?', default=None,
help='apply/delete')
return parser.parse_args()
if __name__ == "__main__":
try:
arguments = parse_arguments()
main(arguments)
except Exception as e:
print('Exception occurred with reason: {}'.format(e)) |
xfel/command_line/cxi_xmerge.py | dperl-sol/cctbx_project | 155 | 12603166 | <reponame>dperl-sol/cctbx_project
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cxi.xmerge
#
# $Id$
from __future__ import absolute_import, division, print_function
from six.moves import range
import iotbx.phil
from dials.array_family import flex
from cctbx import uctbx
from iotbx import mtz
from libtbx.utils import Usage, multi_out
from libtbx import easy_pickle
import os
import time
import sys
from xfel.command_line.cxi_merge import master_phil,scaling_manager
from xfel.command_line.cxi_merge import unit_cell_distribution,show_overall_observations
from xfel.command_line.cxi_merge import scaling_result
from xfel.command_line.cxi_merge import consistent_set_and_model
from xfel import column_parser
from cctbx import miller
from six.moves import zip
#-----------------------------------------------------------------------
class xscaling_manager (scaling_manager) :
def __init__ (self, miller_set, i_model, params, log=None) :
scaling_manager.__init__(self,miller_set,i_model,params,log)
def scale_all (self) :
t1 = time.time()
self.read_all_mysql()
self.millers = self.millers_mysql
self.frames = self.frames_mysql
self._frames = self._frames_mysql
self.observations = self.observations_mysql
self._observations = self._observations_mysql
if self.params.model is None:
self.n_accepted = len(self.frames["cc"])
self.n_low_corr = 0
self.those_accepted = flex.bool(self.n_accepted, True)
else:
self.n_accepted = (self.frames["cc"]>self.params.min_corr).count(True)
self.n_low_corr = (self.frames["cc"]>self.params.min_corr).count(False)
self.those_accepted = (self.frames["cc"]>self.params.min_corr)
statsy = flex.mean_and_variance(self.frames["cc"])
print("%5d images, individual image correlation coefficients are %6.3f +/- %5.3f"%(
len(self.frames["cc"]),
statsy.mean(), statsy.unweighted_sample_standard_deviation(),
), file=self.log)
if self.params.scaling.report_ML and "half_mosaicity_deg" in self.frames:
mosaic = self.frames["half_mosaicity_deg"].select(self.those_accepted)
Mstat = flex.mean_and_variance(mosaic)
print("%5d images, half mosaicity is %6.3f +/- %5.3f degrees"%(
len(mosaic), Mstat.mean(), Mstat.unweighted_sample_standard_deviation()), file=self.log)
domain = self.frames["domain_size_ang"].select(self.those_accepted)
Dstat = flex.mean_and_variance(domain)
print("%5d images, domain size is %6.0f +/- %5.0f Angstroms"%(
len(domain), Dstat.mean(), Dstat.unweighted_sample_standard_deviation()), file=self.log)
invdomain = 1./domain
Dstat = flex.mean_and_variance(invdomain)
print("%5d images, inverse domain size is %f +/- %f Angstroms"%(
len(domain), Dstat.mean(), Dstat.unweighted_sample_standard_deviation()), file=self.log)
print("%5d images, domain size is %6.0f +/- %5.0f Angstroms"%(
len(domain), 1./Dstat.mean(), 1./Dstat.unweighted_sample_standard_deviation()), file=self.log)
t2 = time.time()
print("", file=self.log)
print("#" * 80, file=self.log)
print("FINISHED MERGING", file=self.log)
print(" Elapsed time: %.1fs" % (t2 - t1), file=self.log)
print(" %d integration files were accepted" % (
self.n_accepted), file=self.log)
print(" %d rejected due to poor correlation" % \
self.n_low_corr, file=self.log)
def read_all_mysql(self):
print("reading observations from %s database"%(self.params.backend))
if self.params.backend == 'MySQL':
from xfel.merging.database.merging_database import manager
elif self.params.backend == 'SQLite':
from xfel.merging.database.merging_database_sqlite3 import manager
else:
from xfel.merging.database.merging_database_fs import manager
CART = manager(self.params)
self.millers_mysql = CART.read_indices()
self.millers = self.millers_mysql
self.observations_mysql = CART.read_observations()
parser = column_parser()
parser.set_int("hkl_id",self.observations_mysql["hkl_id"])
parser.set_double("i",self.observations_mysql["i"])
parser.set_double("sigi",self.observations_mysql["sigi"])
parser.set_int("frame_id",self.observations_mysql["frame_id"])
parser.set_int("H",self.observations_mysql["original_h"])
parser.set_int("K",self.observations_mysql["original_k"])
parser.set_int("L",self.observations_mysql["original_l"])
self._observations_mysql = parser
self.observations = dict(hkl_id=parser.get_int("hkl_id"),
i=parser.get_double("i"),
sigi=parser.get_double("sigi"),
frame_id=parser.get_int("frame_id"),
H=parser.get_int("H"),
K=parser.get_int("K"),
L=parser.get_int("L"),
)
self.frames_mysql = CART.read_frames()
parser = column_parser()
parser.set_int("frame_id",self.frames_mysql["frame_id"])
parser.set_double("wavelength",self.frames_mysql["wavelength"])
parser.set_double("cc",self.frames_mysql["cc"])
try:
parser.set_double("slope",self.frames_mysql["slope"])
parser.set_double("offset",self.frames_mysql["offset"])
if self.params.scaling.report_ML:
parser.set_double("domain_size_ang",self.frames_mysql["domain_size_ang"])
parser.set_double("half_mosaicity_deg",self.frames_mysql["half_mosaicity_deg"])
except KeyError: pass
self._frames_mysql = parser
CART.join()
#-----------------------------------------------------------------------
def run(args):
phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show()
work_params = phil.work.extract()
from xfel.merging.phil_validation import application
application(work_params)
if ("--help" in args) :
libtbx.phil.parse(master_phil.show())
return
if ((work_params.d_min is None) or
(work_params.data is None) or
( (work_params.model is None) and work_params.scaling.algorithm != "mark1") ) :
raise Usage("cxi.merge "
"d_min=4.0 "
"data=~/scratch/r0220/006/strong/ "
"model=3bz1_3bz2_core.pdb")
if ((work_params.rescale_with_average_cell) and
(not work_params.set_average_unit_cell)) :
raise Usage("If rescale_with_average_cell=True, you must also specify "+
"set_average_unit_cell=True.")
if work_params.raw_data.sdfac_auto and work_params.raw_data.sdfac_refine:
raise Usage("Cannot specify both sdfac_auto and sdfac_refine")
if not work_params.include_negatives_fix_27May2018:
work_params.include_negatives = False # use old behavior
log = open("%s_%s.log" % (work_params.output.prefix,work_params.scaling.algorithm), "w")
out = multi_out()
out.register("log", log, atexit_send_to=None)
out.register("stdout", sys.stdout)
# Verify that the externally supplied isomorphous reference, if
# present, defines a suitable column of intensities, and exit with
# error if it does not. Then warn if it is necessary to generate
# Bijvoet mates. Failure to catch these issues here would lead to
# possibly obscure problems in cxi/cxi_cc.py later on.
try:
data_SR = mtz.object(work_params.scaling.mtz_file)
except RuntimeError:
pass
else:
array_SR = None
obs_labels = []
for array in data_SR.as_miller_arrays():
this_label = array.info().label_string().lower()
if array.observation_type() is not None:
obs_labels.append(this_label.split(',')[0])
if this_label.find('fobs')>=0:
array_SR = array.as_intensity_array()
break
if this_label.find('imean')>=0:
array_SR = array.as_intensity_array()
break
if this_label.find(work_params.scaling.mtz_column_F)==0:
array_SR = array.as_intensity_array()
break
if array_SR is None:
known_labels = ['fobs', 'imean', work_params.scaling.mtz_column_F]
raise Usage(work_params.scaling.mtz_file +
" does not contain any observations labelled [" +
", ".join(known_labels) +
"]. Please set scaling.mtz_column_F to one of [" +
",".join(obs_labels) + "].")
elif not work_params.merge_anomalous and not array_SR.anomalous_flag():
print("Warning: Preserving anomalous contributors, but %s " \
"has anomalous contributors merged. Generating identical Bijvoet " \
"mates." % work_params.scaling.mtz_file, file=out)
# Read Nat's reference model from an MTZ file. XXX The observation
# type is given as F, not I--should they be squared? Check with Nat!
print("I model", file=out)
if work_params.model is not None:
from xfel.merging.general_fcalc import run
i_model = run(work_params)
work_params.target_unit_cell = i_model.unit_cell()
work_params.target_space_group = i_model.space_group_info()
i_model.show_summary()
else:
i_model = None
print("Target unit cell and space group:", file=out)
print(" ", work_params.target_unit_cell, file=out)
print(" ", work_params.target_space_group, file=out)
miller_set, i_model = consistent_set_and_model(work_params,i_model)
# ---- Augment this code with any special procedures for x scaling
scaler = xscaling_manager(
miller_set=miller_set,
i_model=i_model,
params=work_params,
log=out)
scaler.scale_all()
if scaler.n_accepted == 0:
return None
# --- End of x scaling
scaler.uc_values = unit_cell_distribution()
for icell in range(len(scaler.frames["unit_cell"])):
if scaler.params.model is None:
scaler.uc_values.add_cell(
unit_cell=scaler.frames["unit_cell"][icell])
else:
scaler.uc_values.add_cell(
unit_cell=scaler.frames["unit_cell"][icell],
rejected=(scaler.frames["cc"][icell] < scaler.params.min_corr))
scaler.show_unit_cell_histograms()
if (work_params.rescale_with_average_cell) :
average_cell_abc = scaler.uc_values.get_average_cell_dimensions()
average_cell = uctbx.unit_cell(list(average_cell_abc) +
list(work_params.target_unit_cell.parameters()[3:]))
work_params.target_unit_cell = average_cell
print("", file=out)
print("#" * 80, file=out)
print("RESCALING WITH NEW TARGET CELL", file=out)
print(" average cell: %g %g %g %g %g %g" % \
work_params.target_unit_cell.parameters(), file=out)
print("", file=out)
scaler.reset()
scaler = xscaling_manager(
miller_set=miller_set,
i_model=i_model,
params=work_params,
log=out)
scaler.scale_all()
scaler.uc_values = unit_cell_distribution()
for icell in range(len(scaler.frames["unit_cell"])):
if scaler.params.model is None:
scaler.uc_values.add_cell(
unit_cell=scaler.frames["unit_cell"][icell])
else:
scaler.uc_values.add_cell(
unit_cell=scaler.frames["unit_cell"][icell],
rejected=(scaler.frames["cc"][icell] < scaler.params.min_corr))
scaler.show_unit_cell_histograms()
if False : #(work_params.output.show_plots) :
try :
plot_overall_completeness(completeness)
except Exception as e :
print("ERROR: can't show plots")
print(" %s" % str(e))
print("\n", file=out)
reserve_prefix = work_params.output.prefix
for data_subset in [1,2,0]:
work_params.data_subset = data_subset
work_params.output.prefix = "%s_s%1d_%s"%(reserve_prefix,data_subset,work_params.scaling.algorithm)
if work_params.data_subset == 0:
scaler.frames["data_subset"] = flex.bool(scaler.frames["frame_id"].size(),True)
elif work_params.data_subset == 1:
scaler.frames["data_subset"] = scaler.frames["odd_numbered"]
elif work_params.data_subset == 2:
scaler.frames["data_subset"] = scaler.frames["odd_numbered"]==False
# --------- New code ------------------
#sanity check
for mod,obs in zip(miller_set.indices(), scaler.millers["merged_asu_hkl"]):
if mod!=obs: raise Exception("miller index lists inconsistent--check d_min are equal for merge and xmerge scripts")
assert mod==obs
"""Sum the observations of I and I/sig(I) for each reflection.
sum_I = flex.double(i_model.size(), 0.)
sum_I_SIGI = flex.double(i_model.size(), 0.)
scaler.completeness = flex.int(i_model.size(), 0)
scaler.summed_N = flex.int(i_model.size(), 0)
scaler.summed_wt_I = flex.double(i_model.size(), 0.)
scaler.summed_weight = flex.double(i_model.size(), 0.)
scaler.n_rejected = flex.double(scaler.frames["frame_id"].size(), 0.)
scaler.n_obs = flex.double(scaler.frames["frame_id"].size(), 0.)
scaler.d_min_values = flex.double(scaler.frames["frame_id"].size(), 0.)
scaler.ISIGI = {}"""
from xfel import scaling_results, get_scaling_results, get_isigi_dict
results = scaling_results(scaler._observations, scaler._frames,
scaler.millers["merged_asu_hkl"],scaler.frames["data_subset"],
work_params.include_negatives)
results.__getattribute__(
work_params.scaling.algorithm)(
scaler.params.min_corr, scaler.params.target_unit_cell)
sum_I, sum_I_SIGI, \
scaler.completeness, scaler.summed_N, \
scaler.summed_wt_I, scaler.summed_weight, scaler.n_rejected, scaler.n_obs, \
scaler.d_min_values, hkl_ids, i_sigi_list = get_scaling_results(results)
scaler.ISIGI = get_isigi_dict(results)
if work_params.merging.refine_G_Imodel:
from xfel.cxi.merging.refine import find_scale
my_find_scale = find_scale(scaler, work_params)
sum_I, sum_I_SIGI, \
scaler.completeness, scaler.summed_N, \
scaler.summed_wt_I, scaler.summed_weight, scaler.n_rejected, \
scaler.n_obs, scaler.d_min_values, hkl_ids, i_sigi_list \
= my_find_scale.get_scaling_results(results, scaler)
scaler.ISIGI = get_isigi_dict(results)
scaler.wavelength = scaler.frames["wavelength"]
scaler.corr_values = scaler.frames["cc"]
scaler.rejected_fractions = flex.double(scaler.frames["frame_id"].size(), 0.)
for irej in range(len(scaler.rejected_fractions)):
if scaler.n_obs[irej] > 0:
scaler.rejected_fractions = scaler.n_rejected[irej]/scaler.n_obs[irej]
# ---------- End of new code ----------------
if work_params.raw_data.sdfac_refine or work_params.raw_data.errors_from_sample_residuals:
if work_params.raw_data.sdfac_refine:
if work_params.raw_data.error_models.sdfac_refine.minimizer == 'simplex':
from xfel.merging.algorithms.error_model.sdfac_refine import sdfac_refine as error_modeler
elif work_params.raw_data.error_models.sdfac_refine.minimizer == 'lbfgs':
from xfel.merging.algorithms.error_model.sdfac_refine_lbfgs import sdfac_refine_refltable_lbfgs as error_modeler
elif self.params.raw_data.error_models.sdfac_refine.minimizer == 'LevMar':
from xfel.merging.algorithms.error_model.sdfac_refine_levmar import sdfac_refine_refltable_levmar as error_modeler
if work_params.raw_data.errors_from_sample_residuals:
from xfel.merging.algorithms.error_model.errors_from_residuals import errors_from_residuals as error_modeler
error_modeler(scaler).adjust_errors()
if work_params.raw_data.reduced_chi_squared_correction:
from xfel.merging.algorithms.error_model.reduced_chi_squared import reduced_chi_squared
reduced_chi_squared(scaler).compute()
miller_set_avg = miller_set.customized_copy(
unit_cell=work_params.target_unit_cell)
table1 = show_overall_observations(
obs=miller_set_avg,
redundancy=scaler.completeness,
redundancy_to_edge=None,
summed_wt_I=scaler.summed_wt_I,
summed_weight=scaler.summed_weight,
ISIGI=scaler.ISIGI,
n_bins=work_params.output.n_bins,
title="Statistics for all reflections",
out=out,
work_params=work_params)
if table1 is None:
raise Exception("table could not be constructed")
print("", file=out)
if work_params.scaling.algorithm == 'mark0':
n_refl, corr = scaler.get_overall_correlation(sum_I)
else:
n_refl, corr = ((scaler.completeness > 0).count(True), 0)
print("\n", file=out)
table2 = show_overall_observations(
obs=miller_set_avg,
redundancy=scaler.summed_N,
redundancy_to_edge=None,
summed_wt_I=scaler.summed_wt_I,
summed_weight=scaler.summed_weight,
ISIGI=scaler.ISIGI,
n_bins=work_params.output.n_bins,
title="Statistics for reflections where I > 0",
out=out,
work_params=work_params)
if table2 is None:
raise Exception("table could not be constructed")
print("", file=out)
mtz_file, miller_array = scaler.finalize_and_save_data()
loggraph_file = os.path.abspath("%s_graphs.log" % work_params.output.prefix)
f = open(loggraph_file, "w")
f.write(table1.format_loggraph())
f.write("\n")
f.write(table2.format_loggraph())
f.close()
result = scaling_result(
miller_array=miller_array,
plots=scaler.get_plot_statistics(),
mtz_file=mtz_file,
loggraph_file=loggraph_file,
obs_table=table1,
all_obs_table=table2,
n_reflections=n_refl,
overall_correlation=corr)
easy_pickle.dump("%s.pkl" % work_params.output.prefix, result)
work_params.output.prefix = reserve_prefix
# Output table with number of images contribution reflections per
# resolution bin.
from libtbx import table_utils
miller_set_avg.setup_binner(
d_max=100000, d_min=work_params.d_min, n_bins=work_params.output.n_bins)
table_data = [["Bin", "Resolution Range", "# images", "%accept"]]
if work_params.model is None:
appropriate_min_corr = -1.1 # lowest possible c.c.
else:
appropriate_min_corr = work_params.min_corr
n_frames = (scaler.frames['cc'] > appropriate_min_corr).count(True)
iselect = 1
while iselect<work_params.output.n_bins:
col_count1 = results.count_frames(appropriate_min_corr, miller_set_avg.binner().selection(iselect))
print("colcount1",col_count1)
if col_count1>0: break
iselect +=1
if col_count1==0: raise Exception("no reflections in any bins")
for i_bin in miller_set_avg.binner().range_used():
col_count = '%8d' % results.count_frames(
appropriate_min_corr, miller_set_avg.binner().selection(i_bin))
col_legend = '%-13s' % miller_set_avg.binner().bin_legend(
i_bin=i_bin, show_bin_number=False, show_bin_range=False,
show_d_range=True, show_counts=False)
xpercent = results.count_frames(appropriate_min_corr, miller_set_avg.binner().selection(i_bin))/float(n_frames)
percent = '%5.2f'% (100.*xpercent)
table_data.append(['%3d' % i_bin, col_legend, col_count,percent])
table_data.append([""] * len(table_data[0]))
table_data.append(["All", "", '%8d' % n_frames])
print(file=out)
print(table_utils.format(
table_data, has_header=1, justify='center', delim=' '), file=out)
reindexing_ops = {"h,k,l":0} # get a list of all reindexing ops for this dataset
if work_params.merging.reverse_lookup is not None:
for key in scaler.reverse_lookup:
if reindexing_ops.get(scaler.reverse_lookup[key], None) is None:
reindexing_ops[scaler.reverse_lookup[key]]=0
reindexing_ops[scaler.reverse_lookup[key]]+=1
from xfel.cxi.cxi_cc import run_cc
for key in reindexing_ops.keys():
run_cc(work_params,reindexing_op=key,output=out)
if isinstance(scaler.ISIGI, dict):
from xfel.merging import isigi_dict_to_reflection_table
refls = isigi_dict_to_reflection_table(scaler.miller_set.indices(), scaler.ISIGI)
else:
refls = scaler.ISIGI
easy_pickle.dump("%s.refl"%work_params.output.prefix, refls)
return result
if (__name__ == "__main__"):
show_plots = False
if ("--plots" in sys.argv) :
sys.argv.remove("--plots")
show_plots = True
result = run(args=sys.argv[1:])
if result is None:
sys.exit(1)
if (show_plots) :
try :
result.plots.show_all_pyplot()
from wxtbx.command_line import loggraph
loggraph.run([result.loggraph_file])
except Exception as e :
print("Can't display plots")
print("You should be able to view them by running this command:")
print(" wxtbx.loggraph %s" % result.loggraph_file)
raise e
|
dp/cloud/python/magma/configuration_controller/request_router/request_router.py | Aitend/magma | 539 | 12603180 | """
Copyright 2021 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Dict, List
import requests
from magma.configuration_controller.request_router.exceptions import (
RequestRouterError,
)
class RequestRouter(object):
"""
This class is responsible for sending requests to SAS and forwarding SAS responses to Radio Controller.
"""
def __init__(
self,
sas_url: str,
rc_ingest_url: str,
cert_path: str,
ssl_key_path: str,
request_mapping: Dict,
ssl_verify: str,
):
self.sas_url = sas_url
self.rc_ingest_url = rc_ingest_url
self.cert_path = cert_path
self.ssl_key_path = ssl_key_path
self.ssl_verify = ssl_verify
self.request_mapping = request_mapping
def post_to_sas(self, request_dict: Dict[str, List[Dict]]) -> requests.Response:
"""
Parse JSON request and send it to the appropriate SAS endpoint.
It will only look at the first key of the parsed JSON dict, so if there are multiple request types chunked in
one dictionary it will send them to a SAS endpoint pertaining to the first key of the dictionary only.
Therefore it is important to pass the requests grouped under one request name
Parameters:
request_dict: Dictionary with a request name as key and an array of objects as value
Returns:
requests.Response: Response object with SAS response as json payload
Raises:
RequestRouterError: General Request Router error
"""
try:
request_name = next(iter(request_dict))
except StopIteration:
raise RequestRouterError(
"Received an empty requests dictionary",
)
try:
sas_method = self.request_mapping[request_name]
except KeyError:
raise RequestRouterError(
f'Unable to find SAS method matching {request_name}',
)
try:
sas_response = requests.post(
f'{self.sas_url}/{sas_method}',
json=request_dict,
cert=(self.cert_path, self.ssl_key_path),
verify=self.ssl_verify,
)
except Exception as e:
raise RequestRouterError(str(e))
return sas_response
def redirect_sas_response_to_radio_controller(self, sas_response: requests.Response) -> requests.Response:
"""
Send Response object to Radio Controller's ingest endpoint
Parameters:
sas_response: SAS Response object
Returns:
requests.Response: Radio Controller Response object
Raises:
RequestRouterError: General Request Router error
"""
payload = sas_response.json()
try:
return requests.post(self.rc_ingest_url, json=payload)
except Exception as e:
raise RequestRouterError(str(e))
|
app/src/server.py | rgarciajim/minimal-docker-python-setup | 165 | 12603195 | <filename>app/src/server.py
from flask import Flask, request, jsonify
from flask.ext.redis import FlaskRedis
app = Flask(__name__)
app.config.update(
REDIS_URL="redis://redis:6379/0"
)
redis_store = FlaskRedis(app)
@app.route('/')
def index():
addr = request.remote_addr
redis_store.incr(addr)
visits = redis_store.get(addr)
return jsonify({
'ip': addr,
'visits': visits,
})
|
experiments/list5/calculate_label_prediction_scores.py | Elfsong/pygaggle | 166 | 12603267 | import argparse
import glob
import json
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from fever_utils import make_sentence_id
def calculate_scores(args):
evidences = {}
with open(args.dataset_file, 'r', encoding='utf-8') as f:
for line in f:
line_json = json.loads(line.strip())
evidence_sets = []
if line_json['label'] != 'NOT ENOUGH INFO':
for annotator in line_json['evidence']:
evidence_set = [make_sentence_id(evidence[2], evidence[3]) for evidence in annotator]
evidence_sets.append(evidence_set)
evidences[line_json['id']] = evidence_sets
def aggregate(scores):
if args.num_classes == 4:
# filter out samples predicted weak and remove weak scores
scores = scores[np.argmax(scores, axis=1) != 3][:, :3]
if len(scores) == 0:
return 1
if args.strategy == 'first':
return np.argmax(scores[0])
elif args.strategy == 'sum':
return np.argmax(np.sum(np.exp(scores), axis=0))
elif args.strategy == 'nei_default':
maxes = np.argmax(scores, axis=1)
if (0 in maxes and 2 in maxes) or (0 not in maxes and 2 not in maxes):
return 1
elif 0 in maxes:
return 0
elif 2 in maxes:
return 2
return -1
elif args.strategy == 'max':
return np.argmax(np.max(np.exp(scores), axis=0))
return -1
for scores_file in sorted(glob.glob(f'{args.scores_files_prefix}*')):
labels = []
pred_labels = []
fever_scores = []
with open(args.id_file, 'r', encoding='utf-8') as f_id, open(scores_file, 'r', encoding='utf-8') as f_scores:
curr_query = None
curr_label = None # actual label for current query
curr_scores = []
curr_evidences = []
for id_line, scores_line in zip(f_id, f_scores):
query_id, sent_ids, label_str = id_line.strip().split('\t')
query_id = int(query_id)
if query_id != curr_query:
if curr_query is not None:
# aggregate to get predicted label
pred_label = aggregate(np.array(curr_scores))
pred_labels.append(pred_label)
# calculate FEVER score
fever_scores.append(int(pred_label == curr_label and (pred_label == 1 or \
any([set(ev_set).issubset(set(curr_evidences)) for ev_set in evidences[curr_query]]))))
curr_query = query_id
curr_scores.clear()
curr_evidences.clear()
# save actual label
if label_str == 'false':
curr_label = 0
elif label_str == 'weak':
curr_label = 1
elif label_str == 'true':
curr_label = 2
labels.append(curr_label)
# save predicted evidence(s) and scores
if args.num_classes == 3:
_, false_score, nei_score, true_score = scores_line.strip().split('\t')
scores = [float(false_score), float(nei_score), float(true_score)]
elif args.num_classes == 4:
_, false_score, ignore_score, true_score, nei_score = scores_line.strip().split('\t')
scores = [float(false_score), float(nei_score), float(true_score), float(ignore_score)]
curr_scores.append(scores)
curr_evidences.extend(sent_ids.strip().split(' '))
# handle last query
pred_label = aggregate(np.array(curr_scores))
pred_labels.append(pred_label)
fever_scores.append(int(pred_label == curr_label and (pred_label == 1 or \
any([set(ev_set).issubset(set(curr_evidences)) for ev_set in evidences[curr_query]]))))
print(scores_file)
print(f'Label Accuracy: {accuracy_score(labels, pred_labels)}')
print(f'Predicted Label F1 Scores: {f1_score(labels, pred_labels, average=None)}')
print(f'Predicted Label Distribution: {[pred_labels.count(i) for i in range(args.num_classes)]}')
print(f'FEVER Score: {sum(fever_scores) / len(fever_scores)}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates various metrics of label prediction output files.')
parser.add_argument('--id_file', required=True, help='Input query-doc pair ids file.')
parser.add_argument('--scores_files_prefix', required=True, help='Prefix of all T5 label prediction scores files.')
parser.add_argument('--dataset_file', help='FEVER dataset file.')
parser.add_argument('--num_classes', type=int, default=3, help='Number of label prediction classes.')
parser.add_argument('--strategy', help='Format of scores file and method of aggregation if applicable.')
args = parser.parse_args()
calculate_scores(args)
|
Cktgen/cktgen/cktgen/cktgen_from_json.py | mabrains/ALIGN-public | 119 | 12603275 | <filename>Cktgen/cktgen/cktgen/cktgen_from_json.py
from .cktgen import *
import json
if __name__ == "__main__":
args,tech = parse_args()
assert args.source != ''
src = args.source
assert args.placer_json != ''
with open( args.placer_json, "rt") as fp:
placer_results = json.load( fp)
with open( "INPUT/%s_global_router_out.json" % src, "rt") as fp:
global_router_results = json.load( fp)
# print( placer_results)
# print( global_router_results)
def roundUp( x, f=2):
assert x % f == 0
result = f*((x+f-1)//f)
assert x == result
return result
adts = {}
for leaf in placer_results['leaves']:
leaf_bbox = leaf['bbox']
nm = leaf['template_name']
# leaves x bbox is 1:1 with poly pitch
# seems that number for rows is two (FIX this!)
adt = ADT( tech, nm, npp=leaf_bbox[2], nr=(leaf_bbox[3]+3)//4)
adts[nm] = adt
for term in leaf['terminals']:
r = term['rect']
if term['layer'] == "metal1":
assert r[0] == r[2]
adt.addM1Terminal( term['net_name'], rect=r, leaf_bbox=leaf_bbox)
elif term['layer'] == "metal2":
assert r[1] == r[3]
adt.addM2Terminal( term['net_name'], rect=r)
elif term['layer'] == "metal3":
assert r[0] == r[2]
adt.addM3Terminal( term['net_name'], rect=r)
elif term['layer'] == "metal4":
assert r[1] == r[3]
adt.addM4Terminal( term['net_name'], rect=r)
elif term['layer'] == "metal5":
assert r[0] == r[2]
adt.addM5Terminal( term['net_name'], rect=r)
else:
assert False, term['layer']
# using half (the placer grid)
# HACK Dividing by 2
def xg( x):
return tech.pitchPoly//2*tech.halfXGRGrid*x
def yg( y):
return tech.pitchDG //2*tech.halfYGRGrid*y
bbox = placer_results['bbox']
netl = Netlist( nm=args.block_name, bbox=Rect( 0,0, xg(roundUp(bbox[2])), yg(roundUp(bbox[3]))))
adnetl = ADNetlist( args.block_name)
for inst in placer_results['instances']:
tN = inst['template_name']
iN = inst['instance_name']
tr = inst['transformation']
print( tr)
adnetl.addInstance( ADI( adts[tN], iN, ADITransform( xg(tr['oX']), yg(tr['oY']), tr['sX'], tr['sY'])))
for (f,a) in inst['formal_actual_map'].items():
adnetl.connect( iN, f, a)
if 'ports' in placer_results:
ports = placer_results['ports']
for p in ports:
adnetl.addPort( p)
adnetl.genNetlist( netl)
for wire in global_router_results['wires']:
netl.newGR( wire['net_name'], Rect( *wire['rect']), wire['layer'], wire['width'])
pathlib.Path("INPUT").mkdir(parents=True, exist_ok=True)
tech.write_files( "INPUT", netl.nm, netl.bbox.toList())
netl.write_files( tech, "INPUT", args)
|
system/t04_mirror/list.py | Yelp/aptly | 666 | 12603306 | <filename>system/t04_mirror/list.py<gh_stars>100-1000
from lib import BaseTest
import re
class ListMirror1Test(BaseTest):
"""
list mirrors: regular list
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures mirror1 http://cdn-fastly.deb.debian.org/debian/ stretch",
"aptly mirror create -with-sources --ignore-signatures mirror2 http://cdn-fastly.deb.debian.org/debian/ stretch contrib",
"aptly -architectures=i386 mirror create --ignore-signatures mirror3 http://cdn-fastly.deb.debian.org/debian/ stretch non-free",
"aptly mirror create -ignore-signatures mirror4 http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_9.0/ ./",
]
runCmd = "aptly mirror list"
class ListMirror2Test(BaseTest):
"""
list mirrors: empty list
"""
runCmd = "aptly mirror list"
class ListMirror3Test(BaseTest):
"""
list mirrors: raw list
"""
fixtureDB = True
runCmd = "aptly -raw mirror list"
class ListMirror4Test(BaseTest):
"""
list mirrors: raw empty list
"""
runCmd = "aptly -raw mirror list"
class ListMirror5Test(BaseTest):
"""
list mirrors: json empty list
"""
runCmd = "aptly mirror list -json"
class ListMirror6Test(BaseTest):
"""
list mirrors: regular list
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures mirror1 http://cdn-fastly.deb.debian.org/debian/ stretch",
"aptly mirror create -with-sources --ignore-signatures mirror2 http://cdn-fastly.deb.debian.org/debian/ stretch contrib",
"aptly -architectures=i386 mirror create --ignore-signatures mirror3 http://cdn-fastly.deb.debian.org/debian/ stretch non-free",
"aptly mirror create -ignore-signatures mirror4 http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_9.0/ ./",
]
runCmd = "aptly mirror list -json"
def outputMatchPrepare(_, s):
return re.sub(r'[ ]*"UUID": "[\w-]+",?\n', '', s)
|
maha/parsers/rules/distance/values.py | TRoboto/Maha | 152 | 12603323 | <reponame>TRoboto/Maha<filename>maha/parsers/rules/distance/values.py<gh_stars>100-1000
from maha.expressions import EXPRESSION_SPACE_OR_NONE
from maha.parsers.templates import DistanceUnit, Value
from maha.rexy import non_capturing_group
from ..common import TWO_SUFFIX, ValueUnit
KILO = "كيلو"
CENTI = "سا?نتيم?"
MILLI = "مي?لي"
DECI = "ديسي"
ONE_METER = Value(ValueUnit(1, DistanceUnit.METERS), "مترا?")
ONE_KILOMETER = Value(
ValueUnit(1, DistanceUnit.KILOMETERS),
non_capturing_group(KILO + EXPRESSION_SPACE_OR_NONE + ONE_METER, "كم"),
)
ONE_CENTIMETER = Value(
ValueUnit(1, DistanceUnit.CENTIMETERS),
non_capturing_group(CENTI + EXPRESSION_SPACE_OR_NONE + ONE_METER, "سم"),
)
ONE_MILLIMETER = Value(
ValueUnit(1, DistanceUnit.MILLIMETERS),
non_capturing_group(MILLI + EXPRESSION_SPACE_OR_NONE + ONE_METER, "مم"),
)
ONE_DECIMETER = Value(
ValueUnit(1, DistanceUnit.DECIMETERS),
non_capturing_group(DECI + EXPRESSION_SPACE_OR_NONE + ONE_METER, "دسم"),
)
ONE_MILE = Value(ValueUnit(1, DistanceUnit.MILES), "ميلا?")
ONE_YARD = Value(ValueUnit(1, DistanceUnit.YARDS), "يارد[اةه]?")
ONE_FOOT = Value(ValueUnit(1, DistanceUnit.FEET), "قدما?")
ONE_INCH = Value(
ValueUnit(1, DistanceUnit.INCHES), non_capturing_group("[إا]نشا?", "بوص[ةه]")
)
TWO_METERS = Value(ValueUnit(2, DistanceUnit.METERS), "متر" + TWO_SUFFIX)
TWO_MILES = Value(ValueUnit(2, DistanceUnit.MILES), "ميل" + TWO_SUFFIX)
TWO_FEET = Value(ValueUnit(2, DistanceUnit.FEET), "قدم" + TWO_SUFFIX)
TWO_INCHES = Value(
ValueUnit(2, DistanceUnit.INCHES),
non_capturing_group("[إا]نش" + TWO_SUFFIX, "بوصت" + TWO_SUFFIX),
)
SEVERAL_METERS = Value(
ValueUnit(1, DistanceUnit.METERS), non_capturing_group("مترات", "[أا]متار")
)
SEVERAL_KILOMETERS = Value(
ValueUnit(1, DistanceUnit.KILOMETERS),
non_capturing_group(KILO + EXPRESSION_SPACE_OR_NONE + SEVERAL_METERS),
)
SEVERAL_CENTIMETERS = Value(
ValueUnit(1, DistanceUnit.CENTIMETERS),
non_capturing_group(CENTI + EXPRESSION_SPACE_OR_NONE + SEVERAL_METERS),
)
SEVERAL_MILLIMETERS = Value(
ValueUnit(1, DistanceUnit.MILLIMETERS),
non_capturing_group(MILLI + EXPRESSION_SPACE_OR_NONE + SEVERAL_METERS),
)
SEVERAL_DECIMETERS = Value(
ValueUnit(1, DistanceUnit.DECIMETERS),
non_capturing_group(DECI + EXPRESSION_SPACE_OR_NONE + SEVERAL_METERS),
)
SEVERAL_MILES = Value(
ValueUnit(1, DistanceUnit.MILES), non_capturing_group("[اأ]ميال", "ميول")
)
SEVERAL_YARDS = Value(ValueUnit(1, DistanceUnit.YARDS), "ياردات")
SEVERAL_FEET = Value(ValueUnit(1, DistanceUnit.FEET), "[أا]قدام")
SEVERAL_INCHES = Value(
ValueUnit(1, DistanceUnit.INCHES), non_capturing_group("[إا]نشات", "بوصات")
)
|
terrascript/data/camptocamp/jwt.py | mjuenema/python-terrascript | 507 | 12603374 | # terrascript/data/camptocamp/jwt.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:50 UTC)
__all__ = []
|
examples/bayesian-optimization/main-visualize.py | yuki-koyama/mathtoolbox | 195 | 12603399 | <gh_stars>100-1000
#
# Prerequisites:
# pip3 install matplotlib pandas seaborn
#
# Usage:
# python3 main-visualize.py </path/to/rand_result.csv> </path/to/bo_result.csv> </path/to/output.pdf>
#
from typing import Dict, Tuple
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
def process_args() -> Tuple[str, str, str]:
assert len(sys.argv) >= 4
return sys.argv[1], sys.argv[2], sys.argv[3]
def read_csv_as_data_frame(path: str) -> pd.DataFrame:
return pd.read_csv(path, header=None)
def calculate_stats(data_frame: pd.DataFrame) -> pd.DataFrame:
num_iters = data_frame.shape[0]
mean_array = []
lower_array = []
upper_array = []
for iter in range(num_iters):
mean = data_frame.loc[iter].mean()
stdev = data_frame.loc[iter].std()
mean_array.append(mean)
lower_array.append(mean - stdev)
upper_array.append(mean + stdev)
return pd.DataFrame({
"mean": mean_array,
"lower": lower_array,
"upper": upper_array,
})
def visualize_stats(path: str, data: Dict[str, pd.DataFrame]) -> None:
FIG_SIZE = (4, 4)
CONFIDENT_REGION_ALPHA = 0.2
X_TICKS_SKIP = 2
DPI = 300
num_iters = next(iter(data.values())).shape[0]
sns.set()
sns.set_context()
plt.rcParams['font.sans-serif'] = ["Linux Biolinum"]
fig = plt.figure(figsize=FIG_SIZE, dpi=DPI)
ax = fig.add_subplot(1, 1, 1)
for name, data_frame in data.items():
ax.fill_between(range(1, num_iters + 1),
data_frame["lower"],
data_frame["upper"],
alpha=CONFIDENT_REGION_ALPHA,
label=name)
for name, data_frame in data.items():
ax.plot(range(1, num_iters + 1), data_frame["mean"], label=name)
ax.set_xlim([1, num_iters])
ax.set_xticks(range(1, num_iters + 1, X_TICKS_SKIP))
ax.set_xlabel("#iterations")
ax.set_ylabel("Function value")
ax.legend(data.keys())
fig.tight_layout()
plt.savefig(path)
if __name__ == "__main__":
rand_csv_path, bo_csv_path, out_path = process_args()
rand_data_frame = read_csv_as_data_frame(rand_csv_path)
bo_data_frame = read_csv_as_data_frame(bo_csv_path)
rand_stats = calculate_stats(rand_data_frame)
bo_stats = calculate_stats(bo_data_frame)
visualize_stats(out_path, {
"Random Sampling": rand_stats,
"Bayesian Optimization": bo_stats,
})
|
utils.py | pjbgf/python-paddingoracle | 286 | 12603401 | # -*- coding: utf-8 -*-
from base64 import urlsafe_b64decode, urlsafe_b64encode
def dotnet_b64decode(s):
'''Decode .NET Web-Base64 encoded data.'''
s, pad_bytes = s[:-1], int(s[-1])
s += ('=' * pad_bytes)
return urlsafe_b64decode(s)
def dotnet_b64encode(s):
'''.NET Web-Base64 encode data.'''
s = urlsafe_b64encode(s)
pad_bytes = s.count('=')
return s[:-pad_bytes or len(s)] + str(pad_bytes)
def is_vulnerable(encrypted):
'''
Checks encrypted token from ScriptResource.axd or WebResource.axd
to determine if application is vulnerable to MS10-070.
:returns: True if vulnerable, else False
'''
if len(dotnet_b64decode(encrypted)) % 8 == 0:
return True
return False
|
openmc/data/resonance_covariance.py | janmalec/openmc | 262 | 12603410 | from collections.abc import MutableSequence
import warnings
import io
import copy
import numpy as np
import pandas as pd
from . import endf
import openmc.checkvalue as cv
from .resonance import Resonances
def _add_file2_contributions(file32params, file2params):
"""Function for aiding in adding resonance parameters from File 2 that are
not always present in File 32. Uses already imported resonance data.
Paramaters
----------
file32params : pandas.Dataframe
Incomplete set of resonance parameters contained in File 32.
file2params : pandas.Dataframe
Resonance parameters from File 2. Ordered by energy.
Returns
-------
parameters : pandas.Dataframe
Complete set of parameters ordered by L-values and then energy
"""
# Use l-values and competitiveWidth from File 2 data
# Re-sort File 2 by energy to match File 32
file2params = file2params.sort_values(by=['energy'])
file2params.reset_index(drop=True, inplace=True)
# Sort File 32 parameters by energy as well (maintaining index)
file32params.sort_values(by=['energy'], inplace=True)
# Add in values (.values converts to array first to ignore index)
file32params['L'] = file2params['L'].values
if 'competitiveWidth' in file2params.columns:
file32params['competitiveWidth'] = file2params['competitiveWidth'].values
# Resort to File 32 order (by L then by E) for use with covariance
file32params.sort_index(inplace=True)
return file32params
class ResonanceCovariances(Resonances):
"""Resolved resonance covariance data
Parameters
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
Attributes
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
"""
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, ranges):
cv.check_type('resonance ranges', ranges, MutableSequence)
self._ranges = cv.CheckedList(ResonanceCovarianceRange,
'resonance range', ranges)
@classmethod
def from_endf(cls, ev, resonances):
"""Generate resonance covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
resonances : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ResonanceCovariances
Resonance covariance data
"""
file_obj = io.StringIO(ev.section[32, 151])
# Determine whether discrete or continuous representation
items = endf.get_head_record(file_obj)
n_isotope = items[4] # Number of isotopes
ranges = []
for iso in range(n_isotope):
items = endf.get_cont_record(file_obj)
abundance = items[1]
fission_widths = (items[3] == 1) # Flag for fission widths
n_ranges = items[4] # Number of resonance energy ranges
for j in range(n_ranges):
items = endf.get_cont_record(file_obj)
# Unresolved flags - 0: only scattering radius given
# 1: resolved parameters given
# 2: unresolved parameters given
unresolved_flag = items[2]
formalism = items[3] # resonance formalism
# Throw error for unsupported formalisms
if formalism in [0, 7]:
error = 'LRF='+str(formalism)+' covariance not supported '\
'for this formalism'
raise NotImplementedError(error)
if unresolved_flag in (0, 1):
# Resolved resonance region
resonance = resonances.ranges[j]
erange = _FORMALISMS[formalism].from_endf(ev, file_obj,
items, resonance)
ranges.append(erange)
elif unresolved_flag == 2:
warn = 'Unresolved resonance not supported. Covariance '\
'values for the unresolved region not imported.'
warnings.warn(warn)
return cls(ranges)
class ResonanceCovarianceRange:
"""Resonace covariance range. Base class for different formalisms.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max):
self.energy_min = energy_min
self.energy_max = energy_max
def subset(self, parameter_str, bounds):
"""Produce a subset of resonance parameters and the corresponding
covariance matrix to an IncidentNeutron object.
Parameters
----------
parameter_str : str
parameter to be discriminated
(i.e. 'energy', 'captureWidth', 'fissionWidthA'...)
bounds : np.array
[low numerical bound, high numerical bound]
Returns
-------
res_cov_range : openmc.data.ResonanceCovarianceRange
ResonanceCovarianceRange object that contains a subset of the
covariance matrix (upper triangular) as well as a subset parameters
within self.file2params
"""
# Copy range and prevent change of original
res_cov_range = copy.deepcopy(self)
parameters = self.file2res.parameters
cov = res_cov_range.covariance
mpar = res_cov_range.mpar
# Create mask
mask1 = parameters[parameter_str] >= bounds[0]
mask2 = parameters[parameter_str] <= bounds[1]
mask = mask1 & mask2
res_cov_range.parameters = parameters[mask]
indices = res_cov_range.parameters.index.values
# Build subset of covariance
sub_cov_dim = len(indices)*mpar
cov_subset_vals = []
for index1 in indices:
for i in range(mpar):
for index2 in indices:
for j in range(mpar):
if index2*mpar+j >= index1*mpar+i:
cov_subset_vals.append(cov[index1*mpar+i,
index2*mpar+j])
cov_subset = np.zeros([sub_cov_dim, sub_cov_dim])
tri_indices = np.triu_indices(sub_cov_dim)
cov_subset[tri_indices] = cov_subset_vals
res_cov_range.file2res.parameters = parameters[mask]
res_cov_range.covariance = cov_subset
return res_cov_range
def sample(self, n_samples):
"""Sample resonance parameters based on the covariances provided
within an ENDF evaluation.
Parameters
----------
n_samples : int
The number of samples to produce
Returns
-------
samples : list of openmc.data.ResonanceCovarianceRange objects
List of samples size `n_samples`
"""
warn_str = 'Sampling routine does not guarantee positive values for '\
'parameters. This can lead to undefined behavior in the '\
'reconstruction routine.'
warnings.warn(warn_str)
parameters = self.parameters
cov = self.covariance
# Symmetrizing covariance matrix
cov = cov + cov.T - np.diag(cov.diagonal())
formalism = self.formalism
mpar = self.mpar
samples = []
# Handling MLBW/SLBW sampling
if formalism == 'mlbw' or formalism == 'slbw':
params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth',
'competitiveWidth']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values
gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values
gt = gn + gg + gf + gx
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gt[j],
gn[j], gg[j], gf[j], gx[j]])
columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth', 'competitiveWidth']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
# Handling RM sampling
elif formalism == 'rm':
params = ['energy', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values
gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gn[j],
gg[j], gfa[j], gfb[j]])
columns = ['energy', 'L', 'J', 'neutronWidth',
'captureWidth', 'fissionWidthA', 'fissionWidthB']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
return samples
class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange):
"""Multi-level Breit-Wigner resolved resonance formalism covariance data.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'mlbw'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create MLBW covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=32, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
Returns
-------
openmc.data.MultiLevelBreitWignerCovariance
Multi-level Breit-Wigner resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gt = res_values[2::6]
gn = res_values[3::6]
gg = res_values[4::6]
gf = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
mean = items
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gt = values[2::12]
gn = values[3::12]
gg = values[4::12]
gf = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided, no fission width)
# DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5]
res_unc_nonzero = []
for j in range(6):
if j in [1, 2, 5] and res_unc[j] != 0.0:
res_unc_nonzero.append(res_unc[j])
elif j in [0, 3, 4]:
res_unc_nonzero.append(res_unc[j])
par_unc.extend(res_unc_nonzero)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Compatible resolved resonance format
elif lcomp == 0:
cov = np.zeros([4, 4])
records = []
cov_index = 0
for i in range(nls):
items, values = endf.get_list_record(file_obj)
num_res = items[5]
for j in range(num_res):
one_res = values[18*j:18*(j+1)]
res_values = one_res[:6]
cov_values = one_res[6:]
records.append(list(res_values))
# Populate the coviariance matrix for this resonance
# There are no covariances between resonances in lcomp=0
cov[cov_index, cov_index] = cov_values[0]
cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2]
cov[cov_index+1, cov_index+3] = cov_values[4]
cov[cov_index+2, cov_index+2] = cov_values[3]
cov[cov_index+2, cov_index+3] = cov_values[5]
cov[cov_index+3, cov_index+3] = cov_values[6]
cov_index += 4
if j < num_res-1: # Pad matrix for additional values
cov = np.pad(cov, ((0, 4), (0, 4)), 'constant',
constant_values=0)
# Create pandas DataFrame with resonance data, currently
# redundant with data.IncidentNeutron.resonance
columns = ['energy', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of class
mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return mlbw
class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance):
"""Single-level Breit-Wigner resolved resonance formalism covariance data.
Single-level Breit-Wigner resolved resonance data is is identified by LRF=1
in the ENDF-6 format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res)
self.formalism = 'slbw'
class ReichMooreCovariance(ResonanceCovarianceRange):
"""Reich-Moore resolved resonance formalism covariance data.
Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6
format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
mpar : int
Number of parameters in covariance matrix for each individual resonance
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'rm'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create Reich-Moore resonance covariance data from an ENDF
evaluation. Includes the resonance parameters contained separately in
File 32.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=2, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ReichMooreCovariance
Reich-Moore resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # Number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
channel_radius = {}
scattering_radius = {}
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gn = res_values[2::6]
gg = res_values[3::6]
gfa = res_values[4::6]
gfb = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gn = values[2::12]
gg = values[3::12]
gfa = values[4::12]
gfb = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of ReichMooreCovariance
rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return rmc
_FORMALISMS = {
0: ResonanceCovarianceRange,
1: SingleLevelBreitWignerCovariance,
2: MultiLevelBreitWignerCovariance,
3: ReichMooreCovariance
# 7: RMatrixLimitedCovariance
}
|
neural_parts/models/siren.py | naynasa/neural_parts_fork | 137 | 12603437 | import torch
from torch import nn
import numpy as np
class SineLayer(nn.Module):
def __init__(self, in_dims, out_dims, bias=True, is_first=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.in_dims = in_dims
# If is_first=True, omega_0 is a frequency factor which simply multiplies
# the activations before the nonlinearity. Different signals may require
# different omega_0 in the first layer - this is a hyperparameter.
# If is_first=False, then the weights will be divided by omega_0 so as to
# keep the magnitude of activations constant, but boost gradients to the
# weight matrix (see supplement Sec. 1.5)
self.is_first = is_first
self.linear = nn.Linear(in_dims, out_dims, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_dims,
1 / self.in_dims)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_dims) / self.omega_0,
np.sqrt(6 / self.in_dims) / self.omega_0)
def forward(self, x):
return torch.sin(self.omega_0 * self.linear(x))
class Siren(nn.Module):
def __init__(self, in_dims, hidden_dims, hidden_layers, out_dims, outermost_linear=False,
first_omega_0=30, hidden_omega_0=30.):
super().__init__()
self.net = []
self.net.append(SineLayer(in_dims, hidden_dims,
is_first=True, omega_0=first_omega_0))
for i in range(hidden_layers):
self.net.append(SineLayer(hidden_dims, hidden_dims,
is_first=False, omega_0=hidden_omega_0))
if outermost_linear:
final_linear = nn.Linear(hidden_dims, out_dims)
with torch.no_grad():
final_linear.weight.uniform_(-np.sqrt(6 / hidden_dims) / hidden_omega_0,
np.sqrt(6 / hidden_dims) / hidden_omega_0)
self.net.append(final_linear)
else:
self.net.append(SineLayer(hidden_dims, out_dims,
is_first=False, omega_0=hidden_omega_0))
self.net = nn.Sequential(*self.net)
def forward(self, x):
return self.net(x)
|
functions/KNearestNeighborsClassifier.py | sdash77/raster-functions | 173 | 12603446 | <gh_stars>100-1000
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
'''
This raster function performs k-nearest neighbors classification
using scikit-learn and generates a map of the classification of
each pixel.
http://scikit-learn.org/stable/documentation.html
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors
https://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy.loadtxt
'''
class KNNClassifier():
def __init__(self):
self.name = 'K-Nearest Neighbor Classifier'
self.description = 'This raster function performs k-nearest neighbors classification' \
' using scikit-learn and generates a map of the classification of' \
' each pixel.'
# The number of neighbors to use
self.n_neighbors = None
# The CSV file containing the training data
# The inputs is a string
self.training_data_from_file = None
def getParameterInfo(self):
return [
{
'name': 'rasters',
'dataType': 'rasters',
'value': None,
'required': True,
'displayName': 'Input Rasters',
'description': 'This should include several individual rasters. The rasters must be in the same order as the columns in the CSV file'
},
{
'name': 'n_neighbors',
'dataType': 'numeric',
'value': 5,
'required': True,
'displayName': 'Number of neighbors (integer)',
'description': 'Number of neighbors to use by default for kneighbors queries (integer).'
},
{
'name': 'training_data_from_file',
'dataType': 'string',
'value': 'C:\\PROJECTS\\ML\\training_data.csv',
'required': True,
'displayName': 'Training data CSV filepath',
'description': 'Full filepath directory to training data CSV. '
'Internally this will load from disk and be converted to a pandas dataframe.'
}
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 1 | 2 | 4 | 8, # inherit all from the raster
'invalidateProperties': 2 | 4 | 8 # reset stats, histogram, key properties
}
def updateRasterInfo(self, **kwargs):
self.n_neighbors = int(kwargs['n_neighbors'])
self.datafile = str(kwargs['training_data_from_file'])
# Number of output bands:
# There should be one band for each neighbor calculated
#out_band_count = self.n_neighbors
# Output pixel information
kwargs['output_info']['pixelType'] = 'f4'
kwargs['output_info']['statistics'] = ()
kwargs['output_info']['histogram'] = ()
kwargs['output_info']['bandCount'] = 1
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
# Read the input CSV file into a dataframe
self.df = pd.read_csv(self.datafile)#(datafile)
# Drop the fields that are not involved in predicting or mapping the values.
# These fields will generally be things like "OBJECTID".
fields_to_drop = ['OBJECTID', 'LOCATION_X', 'LOCATION_Y'] # Fields that aren't used in the analysis
self.df.drop(fields_to_drop, axis=1, inplace=True)
# Separate dataframe into training environmental variables and observed values.
# The environmental values, x_train, are used to train the model
# We are trying to map\predict the y_train value
y_val = 'VarToPredict' # The value that you probably want to predict
x_train = self.df.loc[:, self.df.columns != y_val]
y_train = self.df[y_val]
# The model won't work if there is missing or null data.
# Fill null values with 0 (or some other value)
x_train.fillna(0, inplace=True)
y_train.fillna(0, inplace=True)
# Read pixel blocks
pix_blocks = pixelBlocks['rasters_pixels']
# Convert pixel blocks to numpy array
pix_array = np.asarray(pix_blocks)
# Remove any extra indices
pix_array = np.squeeze(pix_array)
# Reshape the pixels into a 2D array that is number of pixels x number of predictor variables
pixels_reshaped = pix_array.reshape(pix_array.shape[0], -1).transpose()
# Classify each point using the k-neighbors classifier
knn = KNeighborsClassifier(n_neighbors=self.n_neighbors)
pred = knn.fit(x_train, y_train).predict(pixels_reshaped)
# Reshape into a 2D array
res = pred.reshape((pix_array.shape[1], pix_array.shape[2]))
# Write output pixels
pixelBlocks['output_pixels'] = res.astype(
props['pixelType'],
copy=True
)
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
return keyMetadata
|
sympy/physics/control/__init__.py | bigfooted/sympy | 603 | 12603471 | <gh_stars>100-1000
from .lti import TransferFunction, Series, Parallel, Feedback
__all__ = ['TransferFunction', 'Series', 'Parallel', 'Feedback']
|
utils_cv/classification/widget.py | muminkoykiran/computervision-recipes | 7,899 | 12603478 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import bqplot
import bqplot.pyplot as bqpyplot
import pandas as pd
from fastai.data_block import LabelList
from ipywidgets import widgets, Layout, IntSlider
import numpy as np
from utils_cv.common.image import im_width, im_height
from utils_cv.common.data import get_files_in_directory
class AnnotationWidget(object):
IM_WIDTH = 500 # pixels
def __init__(
self,
labels: list,
im_dir: str,
anno_path: str,
im_filenames: list = None,
):
"""Widget class to annotate images.
Args:
labels: List of abel names, e.g. ["bird", "car", "plane"].
im_dir: Directory containing the images to be annotated.
anno_path: path where to write annotations to, and (if exists) load annotations from.
im_fnames: List of image filenames. If set to None, then will auto-detect all images in the provided image directory.
"""
self.labels = labels
self.im_dir = im_dir
self.anno_path = anno_path
self.im_filenames = im_filenames
# Init
self.vis_image_index = 0
self.label_to_id = {s: i for i, s in enumerate(self.labels)}
if not im_filenames:
self.im_filenames = [
os.path.basename(s)
for s in get_files_in_directory(
im_dir,
suffixes=(
".jpg",
".jpeg",
".tif",
".tiff",
".gif",
".giff",
".png",
".bmp",
),
)
]
assert (
len(self.im_filenames) > 0
), f"Not a single image specified or found in directory {im_dir}."
# Initialize empty annotations and load previous annotations if file exist
self.annos = pd.DataFrame()
for im_filename in self.im_filenames:
if im_filename not in self.annos:
self.annos[im_filename] = pd.Series(
{"exclude": False, "labels": []}
)
if os.path.exists(self.anno_path):
print(f"Loading existing annotation from {self.anno_path}.")
with open(self.anno_path, "r") as f:
for line in f.readlines()[1:]:
vec = line.strip().split("\t")
im_filename = vec[0]
self.annos[im_filename].exclude = vec[1] == "True"
if len(vec) > 2:
self.annos[im_filename].labels = vec[2].split(",")
# Create UI and "start" widget
self._create_ui()
def show(self):
return self.ui
def update_ui(self):
im_filename = self.im_filenames[self.vis_image_index]
im_path = os.path.join(self.im_dir, im_filename)
# Update the image and info
self.w_img.value = open(im_path, "rb").read()
self.w_filename.value = im_filename
self.w_path.value = self.im_dir
# Fix the width of the image widget and adjust the height
self.w_img.layout.height = (
f"{int(self.IM_WIDTH * (im_height(im_path)/im_width(im_path)))}px"
)
# Update annotations
self.exclude_widget.value = self.annos[im_filename].exclude
for w in self.label_widgets:
w.value = False
for label in self.annos[im_filename].labels:
label_id = self.label_to_id[label]
self.label_widgets[label_id].value = True
def _create_ui(self):
"""Create and initialize widgets"""
# ------------
# Callbacks + logic
# ------------
def skip_image():
"""Return true if image should be skipped, and false otherwise."""
# See if UI-checkbox to skip images is checked
if not self.w_skip_annotated.value:
return False
# Stop skipping if image index is out of bounds
if (
self.vis_image_index <= 0
or self.vis_image_index >= len(self.im_filenames) - 1
):
return False
# Skip if image has annotation
im_filename = self.im_filenames[self.vis_image_index]
labels = self.annos[im_filename].labels
exclude = self.annos[im_filename].exclude
if exclude or len(labels) > 0:
return True
return False
def button_pressed(obj):
"""Next / previous image button callback."""
# Find next/previous image. Variable step is -1 or +1 depending on which button was pressed.
step = int(obj.value)
self.vis_image_index += step
while skip_image():
self.vis_image_index += step
self.vis_image_index = min(
max(self.vis_image_index, 0), len(self.im_filenames) - 1
)
self.w_image_slider.value = self.vis_image_index
self.update_ui()
def slider_changed(obj):
"""Image slider callback.
Need to wrap in try statement to avoid errors when slider value is not a number.
"""
try:
self.vis_image_index = int(obj["new"]["value"])
self.update_ui()
except Exception:
pass
def anno_changed(obj):
"""Label checkbox callback.
Update annotation file and write to disk
"""
# Test if call is coming from the user having clicked on a checkbox to change its state,
# rather than a change of state when e.g. the checkbox value was updated programatically. This is a bit
# of hack, but necessary since widgets.Checkbox() does not support a on_click() callback or similar.
if (
"new" in obj
and isinstance(obj["new"], dict)
and len(obj["new"]) == 0
):
# If single-label annotation then unset all checkboxes except the one which the user just clicked
if not self.w_multi_class.value:
for w in self.label_widgets:
if w.description != obj["owner"].description:
w.value = False
# Update annotation object
im_filename = self.im_filenames[self.vis_image_index]
self.annos[im_filename].labels = [
w.description for w in self.label_widgets if w.value
]
self.annos[im_filename].exclude = self.exclude_widget.value
# Write to disk as tab-separated file.
with open(self.anno_path, "w") as f:
f.write(
"{}\t{}\t{}\n".format(
"IM_FILENAME", "EXCLUDE", "LABELS"
)
)
for k, v in self.annos.items():
if v.labels != [] or v.exclude:
f.write(
"{}\t{}\t{}\n".format(
k, v.exclude, ",".join(v.labels)
)
)
# ------------
# UI - image + controls (left side)
# ------------
w_next_image_button = widgets.Button(description="Next")
w_next_image_button.value = "1"
w_next_image_button.layout = Layout(width="80px")
w_next_image_button.on_click(button_pressed)
w_previous_image_button = widgets.Button(description="Previous")
w_previous_image_button.value = "-1"
w_previous_image_button.layout = Layout(width="80px")
w_previous_image_button.on_click(button_pressed)
self.w_filename = widgets.Text(
value="", description="Name:", layout=Layout(width="200px")
)
self.w_path = widgets.Text(
value="", description="Path:", layout=Layout(width="200px")
)
self.w_image_slider = IntSlider(
min=0,
max=len(self.im_filenames) - 1,
step=1,
value=self.vis_image_index,
continuous_update=False,
)
self.w_image_slider.observe(slider_changed)
self.w_img = widgets.Image()
self.w_img.layout.width = f"{self.IM_WIDTH}px"
w_header = widgets.HBox(
children=[
w_previous_image_button,
w_next_image_button,
self.w_image_slider,
self.w_filename,
self.w_path,
]
)
# ------------
# UI - info (right side)
# ------------
# Options widgets
self.w_skip_annotated = widgets.Checkbox(
value=False, description="Skip annotated images."
)
self.w_multi_class = widgets.Checkbox(
value=False, description="Allow multi-class labeling"
)
# Label checkboxes widgets
self.exclude_widget = widgets.Checkbox(
value=False, description="EXCLUDE IMAGE"
)
self.exclude_widget.observe(anno_changed)
self.label_widgets = [
widgets.Checkbox(value=False, description=label)
for label in self.labels
]
for label_widget in self.label_widgets:
label_widget.observe(anno_changed)
# Combine UIs into tab widget
w_info = widgets.VBox(
children=[
widgets.HTML(value="Options:"),
self.w_skip_annotated,
self.w_multi_class,
widgets.HTML(value="Annotations:"),
self.exclude_widget,
*self.label_widgets,
]
)
w_info.layout.padding = "20px"
self.ui = widgets.Tab(
children=[
widgets.VBox(
children=[
w_header,
widgets.HBox(children=[self.w_img, w_info]),
]
)
]
)
self.ui.set_title(0, "Annotator")
# Fill UI with content
self.update_ui()
class ResultsWidget(object):
IM_WIDTH = 500 # pixels
def __init__(self, dataset: LabelList, y_score: np.ndarray, y_label: iter):
"""Helper class to draw and update Image classification results widgets.
Args:
dataset (LabelList): Data used for prediction, containing ImageList x and CategoryList y.
y_score (np.ndarray): Predicted scores.
y_label (iterable): Predicted labels. Note, not a true label.
"""
assert len(y_score) == len(y_label) == len(dataset)
self.dataset = dataset
self.pred_scores = y_score
self.pred_labels = y_label
# Init
self.vis_image_index = 0
self.labels = dataset.classes
self.label_to_id = {s: i for i, s in enumerate(self.labels)}
self._create_ui()
@staticmethod
def _list_sort(list1d, reverse=False, comparison_fn=lambda x: x):
"""Sorts list1f and returns (sorted list, list of indices)"""
indices = list(range(len(list1d)))
tmp = sorted(zip(list1d, indices), key=comparison_fn, reverse=reverse)
return list(map(list, list(zip(*tmp))))
def show(self):
return self.ui
def update(self):
scores = self.pred_scores[self.vis_image_index]
im = self.dataset.x[self.vis_image_index] # fastai Image object
_, sort_order = self._list_sort(scores, reverse=True)
pred_labels_str = ""
for i in sort_order:
pred_labels_str += f"{self.labels[i]} ({scores[i]:3.2f})\n"
self.w_pred_labels.value = str(pred_labels_str)
self.w_image_header.value = f"Image index: {self.vis_image_index}"
self.w_img.value = im._repr_png_()
# Fix the width of the image widget and adjust the height
self.w_img.layout.height = (
f"{int(self.IM_WIDTH * (im.size[0]/im.size[1]))}px"
)
self.w_gt_label.value = str(self.dataset.y[self.vis_image_index])
self.w_filename.value = str(
self.dataset.items[self.vis_image_index].name
)
self.w_path.value = str(
self.dataset.items[self.vis_image_index].parent
)
bqpyplot.clear()
bqpyplot.bar(
self.labels,
scores,
align="center",
alpha=1.0,
color=np.abs(scores),
scales={"color": bqplot.ColorScale(scheme="Blues", min=0)},
)
def _create_ui(self):
"""Create and initialize widgets"""
# ------------
# Callbacks + logic
# ------------
def image_passes_filters(image_index):
"""Return if image should be shown."""
actual_label = str(self.dataset.y[image_index])
bo_pred_correct = actual_label == self.pred_labels[image_index]
if (bo_pred_correct and self.w_filter_correct.value) or (
not bo_pred_correct and self.w_filter_wrong.value
):
return True
return False
def button_pressed(obj):
"""Next / previous image button callback."""
step = int(obj.value)
self.vis_image_index += step
self.vis_image_index = min(
max(0, self.vis_image_index), int(len(self.pred_labels)) - 1
)
while not image_passes_filters(self.vis_image_index):
self.vis_image_index += step
if (
self.vis_image_index <= 0
or self.vis_image_index >= int(len(self.pred_labels)) - 1
):
break
self.vis_image_index = min(
max(0, self.vis_image_index), int(len(self.pred_labels)) - 1
)
self.w_image_slider.value = self.vis_image_index
self.update()
def slider_changed(obj):
"""Image slider callback.
Need to wrap in try statement to avoid errors when slider value is not a number.
"""
try:
self.vis_image_index = int(obj["new"]["value"])
self.update()
except Exception:
pass
# ------------
# UI - image + controls (left side)
# ------------
w_next_image_button = widgets.Button(description="Next")
w_next_image_button.value = "1"
w_next_image_button.layout = Layout(width="80px")
w_next_image_button.on_click(button_pressed)
w_previous_image_button = widgets.Button(description="Previous")
w_previous_image_button.value = "-1"
w_previous_image_button.layout = Layout(width="80px")
w_previous_image_button.on_click(button_pressed)
self.w_filename = widgets.Text(
value="", description="Name:", layout=Layout(width="200px")
)
self.w_path = widgets.Text(
value="", description="Path:", layout=Layout(width="200px")
)
self.w_image_slider = IntSlider(
min=0,
max=len(self.pred_labels) - 1,
step=1,
value=self.vis_image_index,
continuous_update=False,
)
self.w_image_slider.observe(slider_changed)
self.w_image_header = widgets.Text("", layout=Layout(width="130px"))
self.w_img = widgets.Image()
self.w_img.layout.width = f"{self.IM_WIDTH}px"
w_header = widgets.HBox(
children=[
w_previous_image_button,
w_next_image_button,
self.w_image_slider,
self.w_filename,
self.w_path,
]
)
# ------------
# UI - info (right side)
# ------------
w_filter_header = widgets.HTML(
value="Filters (use Image +1/-1 buttons for navigation):"
)
self.w_filter_correct = widgets.Checkbox(
value=True, description="Correct classifications"
)
self.w_filter_wrong = widgets.Checkbox(
value=True, description="Incorrect classifications"
)
w_gt_header = widgets.HTML(value="Ground truth:")
self.w_gt_label = widgets.Text(value="")
self.w_gt_label.layout.width = "360px"
w_pred_header = widgets.HTML(value="Predictions:")
self.w_pred_labels = widgets.Textarea(value="")
self.w_pred_labels.layout.height = "200px"
self.w_pred_labels.layout.width = "360px"
w_scores_header = widgets.HTML(value="Classification scores:")
self.w_scores = bqpyplot.figure()
self.w_scores.layout.height = "250px"
self.w_scores.layout.width = "370px"
self.w_scores.fig_margin = {
"top": 5,
"bottom": 80,
"left": 30,
"right": 5,
}
# Combine UIs into tab widget
w_info = widgets.VBox(
children=[
w_filter_header,
self.w_filter_correct,
self.w_filter_wrong,
w_gt_header,
self.w_gt_label,
w_pred_header,
self.w_pred_labels,
w_scores_header,
self.w_scores,
]
)
w_info.layout.padding = "20px"
self.ui = widgets.Tab(
children=[
widgets.VBox(
children=[
w_header,
widgets.HBox(children=[self.w_img, w_info]),
]
)
]
)
self.ui.set_title(0, "Results viewer")
# Fill UI with content
self.update()
|
fig/replaced_by_d3/tanh.py | Zander-Davidson/Neural-Network-Exercise | 14,090 | 12603498 | """
tanh
~~~~
Plots a graph of the tanh function."""
import numpy as np
import matplotlib.pyplot as plt
z = np.arange(-5, 5, .1)
t = np.tanh(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('tanh function')
plt.show()
|
mudpi/extensions/timer/trigger.py | icyspace/mudpi-core | 163 | 12603517 | """
Timer Trigger Interface
Calls actions after a
configurable elapsed time.
"""
import time
from . import NAMESPACE
from mudpi.utils import decode_event_data
from mudpi.extensions import BaseInterface
from mudpi.extensions.trigger import Trigger
from mudpi.logger.Logger import Logger, LOG_LEVEL
from mudpi.constants import FONT_RESET, FONT_MAGENTA
class Interface(BaseInterface):
def load(self, config):
""" Load timer trigger component from configs """
trigger = TimerTrigger(self.mudpi, config)
self.add_component(trigger)
return True
def validate(self, config):
""" Validate the trigger config """
if not isinstance(config, list):
config = [config]
for conf in config:
if not conf.get('key'):
raise ConfigError('Missing `key` in Timer Trigger config.')
return config
def register_actions(self):
""" Register any interface actions """
self.register_component_actions('start', action='start')
self.register_component_actions('stop', action='stop')
self.register_component_actions('pause', action='pause')
self.register_component_actions('reset', action='reset')
self.register_component_actions('restart', action='restart')
class TimerTrigger(Trigger):
""" Timer Trigger
Calls actions after
set elapsed time
"""
""" Properties """
@property
def id(self):
""" Return a unique id for the component """
return self.config['key']
@property
def name(self):
""" Return the display name of the component """
return self.config.get('name') or f"{self.id.replace('_', ' ').title()}"
@property
def max_duration(self):
""" Return the max_duration of the timer """
return self.config.get('duration', 10)
@property
def active(self):
""" Return if the timer is active or not """
return self._active
@property
def duration(self):
if self.active:
self.time_elapsed = (time.perf_counter() - self.time_start) + self._pause_offset
return round(self.time_elapsed, 2)
""" Methods """
def init(self):
""" Init the timer component """
self._listening = False
self._active = False
self.time_elapsed = 0
self._last_event = None
self._pause_offset = 0
self.reset_duration()
if self.mudpi.is_prepared:
if not self._listening:
# TODO: Eventually get a handler returned to unsub just this listener
self.mudpi.events.subscribe(f'{NAMESPACE}/{self.id}', self.handle_event)
self._listening = True
def update(self):
""" Get timer data """
if self.duration >= self.max_duration:
if self.active:
self.stop()
self.trigger()
return True
def reset_duration(self):
""" Reset the elapsed duration """
self.time_start = time.perf_counter()
return self
def handle_event(self, event):
""" Process event data for the timer """
_event_data = decode_event_data(event)
if _event_data == self._last_event:
# Event already handled
return
self._last_event = _event_data
if _event_data.get('event'):
try:
if _event_data['event'] == 'TimerStart':
self.start()
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Started'
)
elif _event_data['event'] == 'TimerStop':
self.stop()
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Stopped'
)
elif _event_data['event'] == 'TimerReset':
self.reset()
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Reset'
)
elif _event_data['event'] == 'TimerRestart':
self.restart()
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Restarted'
)
elif _event_data['event'] == 'TimerPause':
self.pause()
Logger.log(
LOG_LEVEL["debug"],
f'Timer Triger {FONT_MAGENTA}{self.name}{FONT_RESET} Paused'
)
except:
Logger.log(
LOG_LEVEL["info"],
f"Error Decoding Event for Timer Trigger {self.id}"
)
""" Actions """
def start(self, data=None):
""" Start the timer """
if not self.active:
if self.frequency == 'many':
self.reset()
else:
self.reset_duration()
self._active = True
if self._pause_offset == 0:
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Started'
)
else:
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Resumed'
)
return self
def pause(self, data=None):
""" Pause the timer """
if self.active:
self._active = False
self._pause_offset = self.duration
self.reset_duration()
return self
def stop(self, data=None):
""" Stop the timer """
if self.active:
self.reset()
self._active = False
Logger.log(
LOG_LEVEL["debug"],
f'Timer Trigger {FONT_MAGENTA}{self.name}{FONT_RESET} Stopped'
)
return self
def reset(self, data=None):
""" Reset the timer """
self.reset_duration()
self._pause_offset = 0
return self
def restart(self, data=None):
""" Restart the timer """
self.reset()
self.start()
return self
|
doorman/users/mixins.py | haim/doorman | 614 | 12603524 | # -*- coding: utf-8 -*-
from flask_login import UserMixin
class NoAuthUserMixin(UserMixin):
def get_id(self):
return u''
@property
def username(self):
return u''
|
src/xobjc.py | MustangYM/xia0LLDB | 464 | 12603579 | #! /usr/bin/env python3
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
# _ ___ _ _ _____ ____
# (_) / _ \| | | | | __ \| _ \
# __ ___ __ _| | | | | | | | | | | |_) |
# \ \/ / |/ _` | | | | | | | | | | | _ <
# > <| | (_| | |_| | |____| |____| |__| | |_) |
# /_/\_\_|\__,_|\___/|______|______|_____/|____/
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
import lldb
import os
import shlex
import optparse
import json
import re
import utils
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f xobjc.ivars ivars -h "ivars made by xia0"')
debugger.HandleCommand('command script add -f xobjc.methods methods -h "methods made by xia0"')
debugger.HandleCommand('command script add -f xobjc.xivars xivars -h "ivars made by xia0 for macOS or ivars not work"')
debugger.HandleCommand('command script add -f xobjc.xmethods xmethods -h "methods made by xia0 for macOS or methods not work"')
def ivars(debugger, command, exe_ctx, result, internal_dict):
def generate_option_parser():
usage = "usage: xmethods"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-n", "--name",
action="store",
default=None,
dest="name",
help="set the class name for methods")
return parser
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
if options.name:
clzname = options.name
clzname = re.search("^\"(.*)\"$", clzname).group(1)
utils.ILOG("will get methods for class:\"{}\"".format(clzname))
code = '''
Class clz = objc_getClass(\"{}\");
id ret = [clz _ivarDescription];
ret
'''.format(clzname)
ret = utils.exe_script(debugger, code)
result.AppendMessage(ret)
return result
clz = args[0]
code = '''
id ret = [{} _ivarDescription];
ret
'''.format(clz)
ret = utils.exe_script(debugger, code)
result.AppendMessage(ret)
return result
def methods(debugger, command, exe_ctx, result, internal_dict):
def generate_option_parser():
usage = "usage: xmethods"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-n", "--name",
action="store",
default=None,
dest="name",
help="set the class name for methods")
return parser
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
if options.name:
clzname = options.name
try:
clzname = re.search("^\"(.*)\"$", clzname).group(1)
except:
utils.ELOG("input format error! need \"class name\"")
return
utils.ILOG("will get methods for class:\"{}\"".format(clzname))
code = '''
Class clz = objc_getClass(\"{}\");
id ret = [clz _shortMethodDescription];
ret
'''.format(clzname)
ret = utils.exe_script(debugger, code)
result.AppendMessage(ret)
return result
clz = args[0]
code = '''
id ret = [{} _shortMethodDescription];
ret
'''.format(clz)
ret = utils.exe_script(debugger, code)
result.AppendMessage(ret)
return result
def xivars(debugger, command, exe_ctx, result, internal_dict):
def generate_option_parser():
usage = "usage: xivars"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-a", "--address",
action="store",
default=None,
dest="address",
help="set a breakpoint at absolute address")
return parser
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
result.AppendMessage("command is still developing. please wait...\n")
return parser
def xmethods(debugger, command, exe_ctx, result, internal_dict):
def generate_option_parser():
usage = "usage: xmethods"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-a", "--address",
action="store",
default=None,
dest="address",
help="set a breakpoint at absolute address")
return parser
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
result.AppendMessage("command is still developing. please wait...\n")
return parser |
rules/actions_write/file.bzl | CyberFlameGO/examples | 572 | 12603585 | <reponame>CyberFlameGO/examples
"""Generate a file.
In this example, the content is passed via an attribute. If you generate
large files with a lot of static content, consider using
`ctx.actions.expand_template` instead.
"""
def file(**kwargs):
_file(out = "{name}.txt".format(**kwargs), **kwargs)
def _impl(ctx):
output = ctx.outputs.out
ctx.actions.write(output = output, content = ctx.attr.content)
_file = rule(
implementation = _impl,
attrs = {"content": attr.string(), "out": attr.output()},
)
|
indy_common/serialization.py | Rob-S/indy-node | 627 | 12603597 | from common.serializers.json_serializer import JsonSerializer
attrib_raw_data_serializer = JsonSerializer()
|
wagtail/tests/modeladmintest/migrations/0009_relatedlink.py | brownaa/wagtail | 8,851 | 12603623 | <reponame>brownaa/wagtail<filename>wagtail/tests/modeladmintest/migrations/0009_relatedlink.py<gh_stars>1000+
# Generated by Django 3.1.1 on 2020-10-01 18:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0057_page_locale_fields_notnull'),
('modeladmintest', '0008_solobook'),
]
operations = [
migrations.CreateModel(
name='RelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('link', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.page')),
],
),
]
|
hs_swat_modelinstance/migrations/0003_auto_20151013_1955.py | hydroshare/hydroshare | 178 | 12603649 | <reponame>hydroshare/hydroshare
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0006_auto_20150917_1515'),
('hs_model_program', '0003_auto_20150813_1730'),
('hs_swat_modelinstance', '0002_auto_20150813_1726'),
]
operations = [
migrations.CreateModel(
name='SWATModelInstanceResource',
fields=[
],
options={
'ordering': ('_order',),
'verbose_name': 'SWAT Model Instance Resource',
'proxy': True,
},
bases=('hs_core.baseresource',),
),
migrations.RemoveField(
model_name='executedby',
name='name',
),
migrations.RemoveField(
model_name='executedby',
name='url',
),
migrations.AddField(
model_name='executedby',
name='model_name',
field=models.CharField(default=None, max_length=500, choices=[('-', ' ')]),
preserve_default=True,
),
migrations.AddField(
model_name='executedby',
name='model_program_fk',
field=models.ForeignKey(related_name='swatmodelinstance', blank=True, to='hs_model_program.ModelProgramResource', null=True),
preserve_default=True,
),
]
|
dockerscan/actions/image/console.py | michalkoczwara/dockerscan | 1,286 | 12603666 | <filename>dockerscan/actions/image/console.py
import os
import logging
from dockerscan import get_log_level, run_in_console
from .api import *
from .model import *
from ..helpers import display_results_console
log = logging.getLogger('dockerscan')
def launch_dockerscan_image_info_in_console(config: DockerImageInfoModel):
"""Launch in console mode"""
log.setLevel(get_log_level(config.verbosity))
with run_in_console(config.debug):
log.console("Starting analyzing docker image...")
log.console("Selected image: '{}'".format(
os.path.basename(config.image_path)))
results = run_image_info_dockerscan(config)
# Display image summary
log.console("Analysis finished. Results:")
display_results_console(results, log)
def launch_dockerscan_image_extract_in_console(config: DockerImageInfoModel):
"""Launch in console mode"""
log.setLevel(get_log_level(config.verbosity))
with run_in_console(config.debug):
log.console("Starting the extraction of docker image...")
log.console("Selected image: '{}'".format(
os.path.basename(config.image_path)))
run_image_extract_dockerscan(config)
# Display image summary
log.console("Image content extracted")
def launch_dockerscan_image_analyze_in_console(config: DockerImageAnalyzeModel):
"""Launch in console mode"""
log.setLevel(get_log_level(config.verbosity))
with run_in_console(config.debug):
log.console("Starting the analysis of docker image...")
log.console("Selected image: '{}'".format(
os.path.basename(config.image_path)))
results = run_image_analyze_dockerscan(config)
# Display image summary
log.console("Analysis finished. Results:")
display_results_console(results, log)
__all__ = ("launch_dockerscan_image_info_in_console",
"launch_dockerscan_image_extract_in_console",
"launch_dockerscan_image_analyze_in_console")
|
tests/components/weathering/test_exponential_weatherer.py | amanaster2/landlab | 257 | 12603670 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 14:02:18 2020
@author: gtucker
"""
from landlab import RasterModelGrid
from landlab.components import ExponentialWeatherer
def test_create_weatherer_and_change_rate():
grid = RasterModelGrid((3, 3), 1.0)
grid.add_zeros("soil__depth", at="node")
ew = ExponentialWeatherer(grid, soil_production__maximum_rate=0.0001)
ew.maximum_weathering_rate = 0.0004
assert ew.maximum_weathering_rate == 0.0004
|
Site-blocker/web-blocker.py | A-kriti/Amazing-Python-Scripts | 930 | 12603716 | <gh_stars>100-1000
import time
from datetime import datetime as dt
# Windows host file path
hostsPath = r"C:\Windows\System32\drivers\etc\hosts"
redirect = "127.0.0.1"
# Add the website you want to block, in this list
websites = [
"www.amazon.in", "www.youtube.com", "youtube.com", "www.facebook.com",
"facebook.com"
]
while True:
# Duration during which, website blocker will work
if dt(dt.now().year,
dt.now().month,
dt.now().day, 9) < dt.now() < dt(dt.now().year,
dt.now().month,
dt.now().day, 18):
print("Access denied to Website")
with open(hostsPath, 'r+') as file:
content = file.read()
for site in websites:
if site in content:
pass
else:
file.write(redirect + " " + site + "\n")
else:
with open(hostsPath, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(site in line for site in websites):
file.write(line)
file.truncate()
print("Allowed access!")
time.sleep(5)
|
numba/tests/test_llvm_pass_timings.py | auderson/numba | 6,620 | 12603718 | import unittest
from numba import njit
from numba.tests.support import TestCase, override_config
from numba.misc import llvm_pass_timings as lpt
timings_raw1 = """
===-------------------------------------------------------------------------===
... Pass execution timing report ...
===-------------------------------------------------------------------------===
Total Execution Time: 0.0001 seconds (0.0001 wall clock)
---User Time--- --System Time-- --User+System-- ---Wall Time--- --- Name ---
0.0001 ( 90.1%) 0.0001 ( 90.1%) 0.0001 ( 90.1%) 0.0001 ( 90.1%) A1
0.0000 ( 9.9%) 0.0000 ( 9.9%) 0.0000 ( 9.9%) 0.0000 ( 9.9%) A2
0.0001 (100.0%) 0.0001 (100.0%) 0.0001 (100.0%) 0.0001 (100.0%) Total
""" # noqa: E501
timings_raw2 = """
===-------------------------------------------------------------------------===
... Pass execution timing report ...
===-------------------------------------------------------------------------===
Total Execution Time: 0.0001 seconds (0.0001 wall clock)
---User Time--- --System Time-- --User+System-- ---Wall Time--- --- Name ---
0.0001 ( 90.1%) ----- 0.0001 ( 90.1%) 0.0001 ( 90.1%) A1
0.0000 ( 9.9%) ----- 0.0000 ( 9.9%) 0.0000 ( 9.9%) A2
0.0001 (100.0%) ----- 0.0001 (100.0%) 0.0001 (100.0%) Total
""" # noqa: E501
class TestLLVMPassTimings(TestCase):
def test_usage(self):
@njit
def foo(n):
c = 0
for i in range(n):
c += i
return c
with override_config('LLVM_PASS_TIMINGS', True):
foo(10)
md = foo.get_metadata(foo.signatures[0])
timings = md['llvm_pass_timings']
# Check: timing is of correct type
self.assertIsInstance(timings, lpt.PassTimingsCollection)
# Check: basic for __str__
text = str(timings)
self.assertIn("Module passes (full optimization)", text)
# Check: there must be more than one record
self.assertGreater(len(timings), 0)
# Check: __getitem__
last = timings[-1]
self.assertIsInstance(last, lpt.NamedTimings)
# Check: NamedTimings
self.assertIsInstance(last.name, str)
self.assertIsInstance(last.timings, lpt.ProcessedPassTimings)
def test_analyze(self):
@njit
def foo(n):
c = 0
for i in range(n):
for j in range(i):
c += j
return c
with override_config('LLVM_PASS_TIMINGS', True):
foo(10)
md = foo.get_metadata(foo.signatures[0])
timings_collection = md['llvm_pass_timings']
# Check: get_total_time()
self.assertIsInstance(timings_collection.get_total_time(), float)
# Check: summary()
self.assertIsInstance(timings_collection.summary(), str)
# Check: list_longest_first() ordering
longest_first = timings_collection.list_longest_first()
self.assertEqual(len(longest_first), len(timings_collection))
last = longest_first[0].timings.get_total_time()
for rec in longest_first[1:]:
cur = rec.timings.get_total_time()
self.assertGreaterEqual(last, cur)
cur = last
def test_parse_raw(self):
timings1 = lpt.ProcessedPassTimings(timings_raw1)
self.assertAlmostEqual(timings1.get_total_time(), 0.0001)
self.assertIsInstance(timings1.summary(), str)
timings2 = lpt.ProcessedPassTimings(timings_raw2)
self.assertAlmostEqual(timings2.get_total_time(), 0.0001)
self.assertIsInstance(timings2.summary(), str)
class TestLLVMPassTimingsDisabled(TestCase):
def test_disabled_behavior(self):
@njit
def foo(n):
c = 0
for i in range(n):
c += i
return c
with override_config('LLVM_PASS_TIMINGS', False):
foo(10)
md = foo.get_metadata(foo.signatures[0])
timings = md['llvm_pass_timings']
# Check that the right message is returned
self.assertEqual(timings.summary(), "No pass timings were recorded")
# Check that None is returned
self.assertIsNone(timings.get_total_time())
# Check that empty list is returned
self.assertEqual(timings.list_longest_first(), [])
if __name__ == "__main__":
unittest.main()
|
modules/dbnd-airflow-monitor/src/airflow_monitor/tracking_service/error_aggregator.py | busunkim96/dbnd | 224 | 12603734 | from typing import Optional
from weakref import WeakKeyDictionary
import attr
from dbnd._core.utils.timezone import utcnow
@attr.s
class ErrorAggregatorResult:
message = attr.ib() # type: Optional[str]
should_update = attr.ib() # type: bool
class ErrorAggregator:
def __init__(self):
# we use WeakKeyDictionary and not regular dict() since keys used for
# reporting can be objects, and we want to make sure to clean up those
# keys/messages when then no longer exist.
# the main use case is that passed key in function object, in theory it can be
# destroyed, in which case it will be stuck here forever (=means reporting to
# webserver forever). Practically currently it's not happening since decorators
# are on class functions and not on specific objects, but this is just to be
# safe. A little bit more real world example would be:
# class A:
# def something(self):
# try:
# raise NotImplementedError()
# except Exception as e:
# report_error(self.something, str(e))
#
# for i in range(10):
# A().something()
# in this case it will be all 10 different "keys"
self.active_errors = WeakKeyDictionary()
self.last_reported_errors = 0
def report(self, key, message: Optional[str]) -> ErrorAggregatorResult:
if message is not None:
self.active_errors[key] = (utcnow(), message)
elif key in self.active_errors:
del self.active_errors[key]
elif self.last_reported_errors == len(self.active_errors):
# we get here if
# * message is None (current invocation was ok)
# * key not in self.active_errors (previous invocation was ok)
# * # of errors didn't change (no weakref evictions)
# => so nothing to update
return ErrorAggregatorResult(None, should_update=False)
self.last_reported_errors = len(self.active_errors)
sorted_message = [
msg for _, msg in sorted(self.active_errors.values(), reverse=True)
]
aggregated_error = "\n\n---------\n\n".join(sorted_message)
# return None if empty string
return ErrorAggregatorResult(aggregated_error or None, should_update=True)
|
deepchem/molnet/load_function/clintox_datasets.py | cjgalvin/deepchem | 3,782 | 12603811 | """
Clinical Toxicity (clintox) dataset loader.
@author <NAME>
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
CLINTOX_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"
CLINTOX_TASKS = ['FDA_APPROVED', 'CT_TOX']
class _ClintoxLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "clintox.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=CLINTOX_URL, dest_dir=self.data_dir)
loader = dc.data.CSVLoader(
tasks=self.tasks, feature_field="smiles", featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_clintox(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load ClinTox dataset
The ClinTox dataset compares drugs approved by the FDA and
drugs that have failed clinical trials for toxicity reasons.
The dataset includes two classification tasks for 1491 drug
compounds with known chemical structures:
#. clinical trial toxicity (or absence of toxicity)
#. FDA approval status.
List of FDA-approved drugs are compiled from the SWEETLEAD
database, and list of drugs that failed clinical trials for
toxicity reasons are compiled from the Aggregate Analysis of
ClinicalTrials.gov(AACT) database.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles" - SMILES representation of the molecular structure
- "FDA_APPROVED" - FDA approval status
- "CT_TOX" - Clinical trial results
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Gayvert, <NAME>., <NAME>, and <NAME>.
"A data-driven approach to predicting successes and failures of clinical
trials."
Cell chemical biology 23.10 (2016): 1294-1301.
.. [2] <NAME>., et al. "Integrated deep learned transcriptomic and
structure-based predictor of clinical trials outcomes." bioRxiv (2016):
095653.
.. [3] <NAME>., et al. "SWEETLEAD: an in silico database of approved
drugs, regulated chemicals, and herbal isolates for computer-aided drug
discovery." PloS one 8.11 (2013): e79568.
.. [4] Aggregate Analysis of ClincalTrials.gov (AACT) Database.
https://www.ctti-clinicaltrials.org/aact-database
"""
loader = _ClintoxLoader(featurizer, splitter, transformers, CLINTOX_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('clintox', reload)
|
src/tfi/doc/template.py | ajbouh/tfi | 160 | 12603842 | <gh_stars>100-1000
import base64
from collections import OrderedDict
from jinja2 import Template as JinjaTemplate
from tfi.format.html.bibtex import citation_bibliography_html
from tfi.format.html.python import html_repr
from tfi.format.html.rst import parse_rst as _parse_rst
from tfi.parse.html import visible_text_for as _visible_text_for
from tfi.doc.example_code_generators import example_code_generator as _resolve_language
_page_template_path = __file__[:-2] + "html"
if _page_template_path.endswith("__init__.html"):
_page_template_path = _page_template_path.replace("__init__.html", __name__.split('.')[-1] + '.html')
class HtmlRenderer(object):
def __init__(self, documentation, include_snapshot, extra_scripts=""):
d = documentation
self._citation_id = 0
self._citation_label_by_refname = {}
self._references = d.references()
self._include_snapshot = include_snapshot
# TODO(adamb) What about arxiv ids already within []_ ??
parsed = self._rst_to_html(d.overview() or "", 2, 'overview-')
body_sections = []
if parsed['body']:
forged_body_title = 'Overview'
forged_body_id = 'overview'
body_sections.append({
'title': forged_body_title,
'id': forged_body_id,
'body': """<section id="%s"><h2>%s</h2>%s</section>""" % (
forged_body_id,
forged_body_title,
parsed['body']
),
})
appendix_section_ids = ['overview-dataset']
appendix_sections = []
for section in parsed['sections']:
if section['id'] in appendix_section_ids:
appendix_sections.append(section)
else:
body_sections.append(section)
# Sort and render method overviews here so that references are properly ordered
methods = sorted(d.methods(), key=lambda method: method.name())
method_overviews = {
method.name(): self._rst_to_html(
method.overview(),
initial_header_level=3,
id_prefix='method-%s-' % method.name(),
)['body']
for method in methods
}
language_names = ['json', 'tensorflow-grpc-python']
# if include_snapshot:
# language_names.append('python')
self._template_fields = {
'facets_overview_proto_base64': base64.b64encode(d.facets_overview_proto()).decode('utf-8') if d.facets_overview_proto() else None,
'title': parsed['title'] or d.name(),
'rst_to_html': self._rst_to_html,
'html_repr': html_repr,
'include_snapshot': include_snapshot,
'extra_scripts': extra_scripts,
'subhead': parsed['subtitle'],
'description': _visible_text_for(parsed['subtitle']),
'bibliography_cite': citation_bibliography_html,
'overview': parsed['body'],
'body_sections': body_sections,
'appendix_sections': appendix_sections,
'authors': d.authors(),
'hyperparameters': [
(name, " ".join(["value", "was", repr(value)]), doc)
for name, _, value, doc in d.hyperparameters()
],
'implementation_notes': d.implementation_notes(),
'references': OrderedDict([
(refname, self._references[refname])
for refname in self._citation_label_by_refname
]),
'method_overviews': method_overviews,
'languages': [
_resolve_language(language_name)
for language_name in language_names
],
'methods': methods,
}
def _citation_label_by_refname_fn(self, citation_refname):
if citation_refname not in self._citation_label_by_refname:
self._citation_id += 1
self._citation_label_by_refname[citation_refname] = str(self._citation_id)
return self._citation_label_by_refname[citation_refname]
def _rst_to_html(self, rst, id_prefix, initial_header_level):
if not rst:
return {
'body': '',
'sections': {},
'title': '',
'subtitle': '',
}
return _parse_rst(
rst,
source_path="<string>",
initial_header_level=initial_header_level,
id_prefix=id_prefix,
citation_label_by_refname=self._citation_label_by_refname_fn,
bibtex_entries_by_refname=lambda citation_refname: self._references[citation_refname],
)
def render(self, proto, host):
with open(_page_template_path, encoding='utf-8') as f:
t = JinjaTemplate(f.read())
return t.render(
proto=proto,
host=host,
**self._template_fields,
)
|
sandbox/__init__.py | kevin-brown/django-cacheback | 160 | 12603844 | <gh_stars>100-1000
from .celeryconfig import app as celery_app
__all__ = ('celery_app',)
|
symoroutils/parfile.py | songhongxiang/symoro | 109 | 12603865 | <filename>symoroutils/parfile.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""
This module performs writing and reading data into PAR file. PAR is a
plain text file used to represent the different parameters of the robot.
"""
import os
import re
from symoroutils import filemgr
from symoroutils import tools
from pysymoro import robot
_keywords = [
'ant', 'sigma', 'b', 'd', 'r', 'gamma', 'alpha', 'mu', 'theta',
'XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ', 'MX', 'MY', 'MZ', 'M',
'IA', 'FV', 'FS', 'FX', 'FY', 'FZ', 'CX', 'CY', 'CZ',
'eta', 'k', 'QP', 'QDP', 'GAM', 'W0', 'WP0', 'V0', 'VP0', 'Z', 'G'
]
_NF = ['ant', 'sigma', 'b', 'd', 'r', 'gamma', 'alpha', 'mu', 'theta']
_NJ = ['eta', 'k', 'QP', 'QDP', 'GAM']
_NL = [
'XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ', 'MX', 'MY', 'MZ', 'M',
'IA', 'FV', 'FS', 'FX', 'FY', 'FZ', 'CX', 'CY', 'CZ'
]
_VEC = ['W0', 'WP0', 'V0', 'VP0']
_ZERO_BASED = {'W0', 'WP0', 'V0', 'VP0', 'Z', 'G'}
_bool_dict = {
'True': True,
'False': False,
'true': True,
'false': False,
'1': True,
'0': False
}
_keyword_repl = {
'Ant': 'ant',
'Mu': 'mu',
'Sigma': 'sigma',
'B': 'b',
'Alpha': 'alpha',
'Theta': 'theta',
'R': 'r'
}
def _extract_vals(robo, key, line):
line = line.replace('{', '')
line = line.replace('}', '')
if key in _ZERO_BASED:
k = 0
elif (robo.is_floating or robo.is_mobile) and key in _NL:
k = 0
else:
k = 1
items = line.split(',')
items_proc = []
prev_item = False
for i, v in enumerate(items):
if v.find('atan2') == -1 and not prev_item:
items_proc.append(v)
elif prev_item:
items_proc.append('%s,%s' % (items[i-1], v))
prev_item = False
else:
prev_item = True
for i, v in enumerate(items_proc):
if robo.put_val(i+k, key, v.strip()) == tools.FAIL:
return tools.FAIL
def _write_par_list(robo, f, key, N0, N):
f.write('{0} = {{{1}'.format(key, robo.get_val(N0, key)))
for i in xrange(N0 + 1, N):
f.write(',{0}'.format(robo.get_val(i, key)))
f.write('}\n')
def writepar(robo):
fname = robo.par_file_path
with open(fname, 'w') as f:
# robot description
f.write('(* Robotname = \'{0}\' *)\n'.format(robo.name))
f.write('NL = {0}\n'.format(robo.nl))
f.write('NJ = {0}\n'.format(robo.nj))
f.write('NF = {0}\n'.format(robo.nf))
f.write('Type = {0}\n'.format(tools.TYPES.index(robo.structure)))
f.write('is_floating = {0}\n'.format(robo.is_floating))
f.write('is_mobile = {0}\n'.format(robo.is_mobile))
# geometric parameters
f.write('\n(* Geometric parameters *)\n')
for key in _NF:
_write_par_list(robo, f, key, 1, robo.NF)
# dynamic parameters
f.write('\n(* Dynamic parameters and external forces *)\n')
N0 = 0 if robo.is_floating or robo.is_mobile else 1
for key in _NL:
_write_par_list(robo, f, key, N0, robo.NL)
# joint parameters
f.write('\n(* Joint parameters *)\n')
for key in _NJ:
_write_par_list(robo, f, key, 1, robo.NJ)
# base parameters - velocity and acceleration
f.write('\n(* Velocity and acceleration of the base *)\n')
for key in _VEC:
_write_par_list(robo, f, key, 0, 3)
# gravity vector
f.write('\n(* Acceleration of gravity *)\n')
_write_par_list(robo, f, 'G', 0, 3)
# base parameters - Z matrix
f.write('\n(* Transformation of 0 frame position fT0 *)\n')
_write_par_list(robo, f, 'Z', 0, 16)
f.write('\n(* End of definition *)\n')
def readpar(robo_name, file_path):
"""Return:
robo: an instance of Robot, read from file
flag: indicates if any errors accured. (tools.FAIL)
"""
with open(file_path, 'r') as f:
f.seek(0)
d = {}
is_floating = False
is_mobile = False
for line in f.readlines():
# check for robot name
name_pattern = r"\(\*.*Robotname.*=.*\'([\s\w-]*)\'.*\*\)"
match = re.match(name_pattern, line)
if match:
robo_name = match.group(1).strip()
# check for joint numbers, link numbers, type
for s in ('NJ', 'NL', 'Type'):
match = re.match(r'^%s.*=([\d\s]*)(\(\*.*)?' % s, line)
if match:
d[s] = int(match.group(1))
continue
# check for is_floating
match = re.match(r'^is_floating.*=([\w\s]*)(\(\*.*)?', line)
if match:
is_floating = _bool_dict[(match.group(1).strip())]
# check for is_mobile
match = re.match(r'^is_mobile.*=([\w\s]*)(\(\*.*)?', line)
if match:
is_mobile = _bool_dict[(match.group(1).strip())]
if len(d) < 2:
return None, tools.FAIL
NF = d['NJ']*2 - d['NL']
#initialize the Robot instance
robo = robot.Robot(
name=robo_name,
NL=d['NL'], NJ=d['NJ'], NF=NF,
structure=tools.TYPES[d['Type']],
is_floating=is_floating,
is_mobile=is_mobile,
directory=os.path.dirname(file_path),
par_file_path=file_path
)
# fitting the data
acc_line = ''
key = ''
f.seek(0)
flag = tools.OK
for line in f.readlines():
if line.find('(*') != -1:
continue
line = line.replace('Pi', 'pi')
match = re.match(r'^(.*)=.*\{(.*)', line)
if match:
acc_line == ''
key = match.group(1).strip()
acc_line = match.group(2).strip()
else:
acc_line += line
if acc_line.find('}') != -1:
if key in _keyword_repl:
key = _keyword_repl[key]
if key in _keywords:
if _extract_vals(robo, key, acc_line) == tools.FAIL:
flag = tools.FAIL
acc_line = ''
key = ''
return robo, flag
|
tests/integration_tests_plugins/dockercompute/setup.py | ilan-WS/cloudify-manager | 124 | 12603949 | from setuptools import setup
setup(name='dockercompute', packages=['dockercompute'])
|
virtual_env/lib/python3.5/site-packages/google_compute_engine/accounts/oslogin_utils.py | straydag/To_Due_Backend | 322 | 12603950 | <gh_stars>100-1000
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for provisioning or deprovisioning a Linux user account."""
import errno
import os
import subprocess
import time
from google_compute_engine import constants
NSS_CACHE_DURATION_SEC = 21600 # 6 hours in seconds.
class OsLoginUtils(object):
"""Utilities for OS Login activation."""
def __init__(self, logger):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
self.logger = logger
self.oslogin_installed = True
self.update_time = 0
def _RunOsLoginControl(self, params):
"""Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
"""
try:
return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params)
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
def _GetStatus(self, two_factor=False):
"""Check whether OS Login is installed.
Args:
two_factor: bool, True if two factor should be enabled.
Returns:
bool, True if OS Login is installed.
"""
params = ['status']
if two_factor:
params += ['--twofactor']
retcode = self._RunOsLoginControl(params)
if retcode is None:
if self.oslogin_installed:
self.logger.warning('OS Login not installed.')
self.oslogin_installed = False
return None
# Prevent log spam when OS Login is not installed.
self.oslogin_installed = True
if not os.path.exists(constants.OSLOGIN_NSS_CACHE):
return False
return not retcode
def _RunOsLoginNssCache(self):
"""Run the OS Login NSS cache binary.
Returns:
int, the return code from the call, or None if the script is not found.
"""
try:
return subprocess.call([constants.OSLOGIN_NSS_CACHE_SCRIPT])
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
def _RemoveOsLoginNssCache(self):
"""Remove the OS Login NSS cache file."""
if os.path.exists(constants.OSLOGIN_NSS_CACHE):
try:
os.remove(constants.OSLOGIN_NSS_CACHE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False):
"""Update whether OS Login is enabled and update NSS cache if necessary.
Args:
oslogin_desired: bool, enable OS Login if True, disable if False.
two_factor_desired: bool, enable two factor if True, disable if False.
Returns:
int, the return code from updating OS Login, or None if not present.
"""
oslogin_configured = self._GetStatus(two_factor=False)
if oslogin_configured is None:
return None
two_factor_configured = self._GetStatus(two_factor=True)
# Two factor can only be enabled when OS Login is enabled.
two_factor_desired = two_factor_desired and oslogin_desired
if oslogin_desired:
params = ['activate']
if two_factor_desired:
params += ['--twofactor']
# OS Login is desired and not enabled.
if not oslogin_configured:
self.logger.info('Activating OS Login.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Enable two factor authentication.
if two_factor_desired and not two_factor_configured:
self.logger.info('Activating OS Login two factor authentication.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Deactivate two factor authentication.
if two_factor_configured and not two_factor_desired:
self.logger.info('Reactivating OS Login with two factor disabled.')
return (self._RunOsLoginControl(['deactivate'])
or self._RunOsLoginControl(params))
# OS Login features are already enabled. Update the cache if appropriate.
current_time = time.time()
if current_time - self.update_time > NSS_CACHE_DURATION_SEC:
self.update_time = current_time
return self._RunOsLoginNssCache()
elif oslogin_configured:
self.logger.info('Deactivating OS Login.')
return (self._RunOsLoginControl(['deactivate'])
or self._RemoveOsLoginNssCache())
# No action was needed.
return 0
|
notebook/pandas_normalization.py | vhn0912/python-snippets | 174 | 12603982 | import pandas as pd
import scipy.stats
from sklearn import preprocessing
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
columns=['col1', 'col2', 'col3'],
index=['a', 'b', 'c'])
print(df)
# col1 col2 col3
# a 0 1 2
# b 3 4 5
# c 6 7 8
print((df - df.min()) / (df.max() - df.min()))
# col1 col2 col3
# a 0.0 0.0 0.0
# b 0.5 0.5 0.5
# c 1.0 1.0 1.0
print(((df.T - df.T.min()) / (df.T.max() - df.T.min())).T)
# col1 col2 col3
# a 0.0 0.5 1.0
# b 0.0 0.5 1.0
# c 0.0 0.5 1.0
print((df - df.values.min()) / (df.values.max() - df.values.min()))
# col1 col2 col3
# a 0.000 0.125 0.250
# b 0.375 0.500 0.625
# c 0.750 0.875 1.000
print((df - df.mean()) / df.std())
# col1 col2 col3
# a -1.0 -1.0 -1.0
# b 0.0 0.0 0.0
# c 1.0 1.0 1.0
print((df - df.mean()) / df.std(ddof=0))
# col1 col2 col3
# a -1.224745 -1.224745 -1.224745
# b 0.000000 0.000000 0.000000
# c 1.224745 1.224745 1.224745
print(((df.T - df.T.mean()) / df.T.std()).T)
# col1 col2 col3
# a -1.0 0.0 1.0
# b -1.0 0.0 1.0
# c -1.0 0.0 1.0
print(((df.T - df.T.mean()) / df.T.std(ddof=0)).T)
# col1 col2 col3
# a -1.224745 0.0 1.224745
# b -1.224745 0.0 1.224745
# c -1.224745 0.0 1.224745
print((df - df.values.mean()) / df.values.std())
# col1 col2 col3
# a -1.549193 -1.161895 -0.774597
# b -0.387298 0.000000 0.387298
# c 0.774597 1.161895 1.549193
print((df - df.values.mean()) / df.values.std(ddof=1))
# col1 col2 col3
# a -1.460593 -1.095445 -0.730297
# b -0.365148 0.000000 0.365148
# c 0.730297 1.095445 1.460593
df_ = df.copy()
s = df_['col1']
df_['col1_min_max'] = (s - s.min()) / (s.max() - s.min())
df_['col1_standardization'] = (s - s.mean()) / s.std()
print(df_)
# col1 col2 col3 col1_min_max col1_standardization
# a 0 1 2 0.0 -1.0
# b 3 4 5 0.5 0.0
# c 6 7 8 1.0 1.0
print(scipy.stats.zscore(df))
# [[-1.22474487 -1.22474487 -1.22474487]
# [ 0. 0. 0. ]
# [ 1.22474487 1.22474487 1.22474487]]
print(type(scipy.stats.zscore(df)))
# <class 'numpy.ndarray'>
print(scipy.stats.zscore(df, axis=None, ddof=1))
# [[-1.46059349 -1.09544512 -0.73029674]
# [-0.36514837 0. 0.36514837]
# [ 0.73029674 1.09544512 1.46059349]]
df_standardization = pd.DataFrame(scipy.stats.zscore(df),
index=df.index, columns=df.columns)
print(df_standardization)
# col1 col2 col3
# a -1.224745 -1.224745 -1.224745
# b 0.000000 0.000000 0.000000
# c 1.224745 1.224745 1.224745
df_ = df.copy()
df_['col1_standardization'] = scipy.stats.zscore(df_['col1'])
print(df_)
# col1 col2 col3 col1_standardization
# a 0 1 2 -1.224745
# b 3 4 5 0.000000
# c 6 7 8 1.224745
mm = preprocessing.MinMaxScaler()
print(mm.fit_transform(df))
# [[0. 0. 0. ]
# [0.5 0.5 0.5]
# [1. 1. 1. ]]
print(type(mm.fit_transform(df)))
# <class 'numpy.ndarray'>
print(preprocessing.minmax_scale(df))
# [[0. 0. 0. ]
# [0.5 0.5 0.5]
# [1. 1. 1. ]]
print(type(preprocessing.minmax_scale(df)))
# <class 'numpy.ndarray'>
df_min_max = pd.DataFrame(mm.fit_transform(df),
index=df.index, columns=df.columns)
print(df_min_max)
# col1 col2 col3
# a 0.0 0.0 0.0
# b 0.5 0.5 0.5
# c 1.0 1.0 1.0
df_ = df.copy()
s = df_['col1'].astype(float)
df_['col1_min_max'] = preprocessing.minmax_scale(s)
df_['col1_standardization'] = preprocessing.scale(s)
print(df_)
# col1 col2 col3 col1_min_max col1_standardization
# a 0 1 2 0.0 -1.224745
# b 3 4 5 0.5 0.000000
# c 6 7 8 1.0 1.224745
|
tests/dopamine/continuous_domains/run_experiment_test.py | kuldeepbrd1/dopamine | 9,825 | 12604002 | # coding=utf-8
# Copyright 2021 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_experiment."""
from typing import Type
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from dopamine.continuous_domains import run_experiment
from dopamine.discrete_domains import gym_lib
from dopamine.discrete_domains import run_experiment as base_run_experiment
from dopamine.jax.agents.sac import sac_agent
import gin
from gym import spaces
class RunExperimentTest(parameterized.TestCase):
def setUp(self):
super(RunExperimentTest, self).setUp()
self.env = self.enter_context(
mock.patch.object(gym_lib, 'GymPreprocessing', autospec=True))
self.env.observation_space = spaces.Box(0.0, 1.0, (5,))
self.env.action_space = spaces.Box(0.0, 1.0, (4,))
# Required for creating a SAC agent in create_agent tests.
gin.bind_parameter(
'circular_replay_buffer.OutOfGraphReplayBuffer.replay_capacity', 10)
gin.bind_parameter(
'circular_replay_buffer.OutOfGraphReplayBuffer.batch_size', 2)
# Required for creating continuous runners.
gin.bind_parameter('ContinuousRunner.create_environment_fn',
lambda: self.env)
gin.bind_parameter('ContinuousTrainRunner.create_environment_fn',
lambda: self.env)
def testCreateContinuousAgentReturnsAgent(self):
agent = run_experiment.create_continuous_agent(self.env, 'sac')
self.assertIsInstance(agent, sac_agent.SACAgent)
def testCreateContinuousAgentWithInvalidNameRaisesException(self):
with self.assertRaises(ValueError):
run_experiment.create_continuous_agent(self.env, 'invalid_name')
@parameterized.named_parameters(
dict(
testcase_name='TrainAndEval',
schedule='continuous_train_and_eval',
expected=run_experiment.ContinuousRunner),
dict(
testcase_name='Train',
schedule='continuous_train',
expected=run_experiment.ContinuousTrainRunner))
def testCreateContinuousRunnerCreatesCorrectRunner(
self, schedule: str, expected: Type[base_run_experiment.Runner]):
gin.bind_parameter('create_continuous_agent.agent_name', 'sac')
runner = run_experiment.create_continuous_runner(
self.create_tempdir(), schedule)
self.assertIsInstance(runner, expected)
def testCreateContinuousRunnerFailsWithInvalidName(self):
with self.assertRaises(ValueError):
run_experiment.create_continuous_runner('unused_dir', 'invalid_name')
if __name__ == '__main__':
absltest.main()
|
code/chapter-15/benchmark.py | dnaveenr/Practical-Deep-Learning-Book | 564 | 12604018 | # <NAME>
# 2019 Edgise
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import tensorflow as tf
import keras
from keras.applications.mobilenetv2 import MobileNetV2, preprocess_input, decode_predictions
import os
#import cv2
import PIL
import time
execution_path = os.getcwd()
print("tf version : " + tf.__version__)
print("keras version : " + keras.__version__)
# In[2]:
# load in the neural network
net = MobileNetV2(weights = 'imagenet', include_top = True)
# In[3]:
input_image_path = os.path.join(execution_path, 'images/cat.jpg')
print("input image read from : " + str(input_image_path))
# In[4]:
# Installing OpenCV on raspberry pi can be a burden, so let's switch to PIL
# However, if OpenCV is installed, it does tend to be a little faster
#input_image = cv2.imread(input_image_path)
#input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
input_image = PIL.Image.open(input_image_path)
input_image = np.asarray(input_image)
# In[11]:
# Use the MobileNet preprocessing function,
# and expand dimensions to create a batch of 1 image
preprocessed = preprocess_input(input_image)
preprocessed = np.expand_dims(preprocessed, axis=0)
print('input tensor shape : ' + str(preprocessed.shape))
# In[12]:
# Do 1 warmup prediction, this way we make sure everything is loaded as it should
print("warmup prediction")
prediction = net.predict(preprocessed)
print(decode_predictions(prediction, top=1)[0])
time.sleep(1)
# In[13]:
print("starting now...")
s = time.time()
for i in range(0,250,1):
prediction = net.predict(preprocessed)
e = time.time()
print('Time[ms] : ' + str(e-s))
print('FPS : ' + str(1.0/((e-s)/250.0)))
# In[ ]:
# Save the model to an h5 file
net.save('MobileNetV2_ImageNet.h5')
print("Model saved.")
|
malaya_speech/train/model/glowtts/common.py | ishine/malaya-speech | 111 | 12604021 | import numpy as np
import tensorflow as tf
from ..utils import shape_list
def maximum_path(value, mask, max_neg_val=-np.inf):
""" Numpy-friendly version. It's about 4 times faster than torch version.
value: [b, t_x, t_y]
mask: [b, t_x, t_y]
"""
value = value * mask
dtype = value.dtype
mask = mask.astype(np.bool)
b, t_x, t_y = value.shape
direction = np.zeros(value.shape, dtype=np.int64)
v = np.zeros((b, t_x), dtype=np.float32)
x_range = np.arange(t_x, dtype=np.float32).reshape(1, -1)
for j in range(t_y):
v0 = np.pad(v, [[0, 0], [1, 0]], mode="constant", constant_values=max_neg_val)[:, :-1]
v1 = v
max_mask = (v1 >= v0)
v_max = np.where(max_mask, v1, v0)
direction[:, :, j] = max_mask
index_mask = (x_range <= j)
v = np.where(index_mask, v_max + value[:, :, j], max_neg_val)
direction = np.where(mask, direction, 1)
path = np.zeros(value.shape, dtype=np.float32)
index = mask[:, :, 0].sum(1).astype(np.int64) - 1
index_range = np.arange(b)
for j in reversed(range(t_y)):
path[index_range, index, j] = 1
index = index + direction[index_range, index, j] - 1
path = path * mask.astype(np.float32)
return path
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = tf.reduce_max(length)
x = tf.range(max_length, dtype=length.dtype)
x = tf.expand_dims(x, 0)
x = tf.tile(x, (tf.shape(length)[0], 1))
ones = tf.ones_like(x)
zeros = tf.zeros_like(x)
return tf.where(x < tf.expand_dims(length, -1), ones, zeros)
def generate_path(duration, mask):
"""
duration: [b, t_x]
mask: [b, t_x, t_y]
"""
b, t_x, t_y = shape_list(mask)
cum_duration = tf.math.cumsum(duration, 1)
path = tf.zeros((b, t_x, t_y), dtype=mask.dtype)
cum_duration_flat = tf.reshape(cum_duration, (b * t_x,))
path = tf.sequence_mask(cum_duration_flat, t_y)
path = tf.cast(path, mask.dtype)
path = tf.reshape(path, (b, t_x, t_y))
path = path - tf.pad(path, [[0, 0], [1, 0], [0, 0]])[:, :-1]
path = path * mask
return path
def squeeze(x, x_mask=None, n_sqz=2):
b, t, c = shape_list(x)
t = (t // n_sqz) * n_sqz
x = x[:, :t]
x_sqz = tf.reshape(x, (b, t//n_sqz, n_sqz, c))
x_sqz = tf.reshape(x_sqz, (b, t//n_sqz, c * n_sqz))
if x_mask is not None:
x_mask = x_mask[:, n_sqz-1::n_sqz]
else:
x_mask = tf.ones((b, t // n_sqz, 1), dtype=x.dtype)
return x_sqz * x_mask, x_mask
def unsqueeze(x, x_mask=None, n_sqz=2):
b, t, c = shape_list(x)
x_unsqz = tf.reshape(x, (b, t, n_sqz, c//n_sqz))
x_unsqz = tf.reshape(x_unsqz, (b, t*n_sqz, c//n_sqz))
if x_mask is not None:
x_mask = tf.expand_dims(x_mask, 2)
x_mask = tf.tile(x_mask, (1, 1, n_sqz, 1))
x_mask = tf.reshape(x_mask, (b, t*n_sqz, 1))
else:
x_mask = tf.ones((b, t*n_sqz, 1), dtype=x.dtype)
return x_unsqz * x_mask, x_mask
|
testdata/iris_lightgbm_rf.py | sysutf/leaves | 334 | 12604086 | import numpy as np
import pickle
from sklearn import datasets
import lightgbm as lgb
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
X = data['data']
y = data['target']
y[y > 0] = 1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
n_estimators = 30
d_train = lgb.Dataset(X_train, label=y_train)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'bagging_freq': 1,
}
clf = lgb.train(params, d_train, n_estimators)
y_pred = clf.predict(X_test)
model_filename = 'lg_rf_iris.model'
pred_filename = 'lg_rf_iris_true_predictions.txt'
# test_filename = 'iris_test.libsvm'
clf.save_model(model_filename)
np.savetxt(pred_filename, y_pred)
# datasets.dump_svmlight_file(X_test, y_test, test_filename) |
deploy/scripts/aphros/stat.py | ChristopherKotthoff/Aphros-with-GraphContraction | 252 | 12604093 | <gh_stars>100-1000
import os
def GetVal(path, colname, t0):
s = os.path.join(path, "stat.dat")
res = 0.
# best
tb = None
vb = None
with open(s) as f:
head = f.readline().split()
it = head.index('t')
iv = head.index(colname)
for l in f:
t = float(l.split()[it])
v = l.split()[iv]
if tb is None or abs(t - t0) < abs(tb - t0):
tb = t
vb = v
return float(vb)
|
Stock/Data/Viewer/JaccardIndex/DyStockDataJaccardIndexWidgets.py | Leonardo-YXH/DevilYuan | 135 | 12604176 | from PyQt5.QtWidgets import QTabWidget
from ....Common.DyStockCommon import *
from .DyStockDataJaccardIndexWidget import *
class DyStockDataJaccardIndexWidgets(QTabWidget):
def __init__(self, jaccardDfs):
super().__init__()
self._jaccardDfs = jaccardDfs
self._initUi()
self.currentChanged.connect(self._onChange)
def _initUi(self):
for index in sorted(self._jaccardDfs):
widget = DyStockDataJaccardIndexWidget(index, self._jaccardDfs[index])
self.addTab(widget, DyStockCommon.indexes[index])
def getActiveIndexJaccardDf(self):
indexName = self.tabText(self.currentIndex())
index = DyStockCommon.getIndexByName(indexName)
return index, self._jaccardDfs[index]
def setCodeSetWidgets(self, codeSetWidgets):
self._codeSetWidgets = codeSetWidgets
def _onChange(self):
self._codeSetWidgets.blockSignals(True)
self._codeSetWidgets.setCurrentIndex(self.currentIndex())
self._codeSetWidgets.blockSignals(False)
|
src/lib/mailcap.py | DTenore/skulpt | 2,671 | 12604190 | <reponame>DTenore/skulpt<gh_stars>1000+
import _sk_fail; _sk_fail._("mailcap")
|
src/compas/numerical/lma/lma_numpy.py | XingxinHE/compas | 235 | 12604196 | # from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# __all__ = ['lma_numpy']
# def objective(qind, E, Ci, Cf, Ct, Cit, pzi, out=2):
# q = E.dot(qind)
# Q = diags([q.flatten()], [0])
# Di = Cit.dot(Q).dot(Ci)
# Df = Cit.dot(Q).dot(Cf)
# zi = spsolve(Di, pzi - Df.dot(zf))
# f = zi - zT
# if out == 1:
# return f
# W = diags([zi.flatten()], [0])
# J = spsolve(-D, Cit).dot(W).dot(E)
# return f, J
# def lma_numpy(cost, x0, args=None, kmax=1000, tau=1e-6, eps1=1e-8, eps2=1e-8):
# """Levenberg-Marquardt algorithm for solving least-squares problems.
# Parameters
# ----------
# cost : callable
# The objective or "cost" function.
# x0 : array-like
# Initial guess for the variables.
# args : list, optional
# Additional arguments to be passed to the objective function.
# kmax : int, optional
# The maximum number of iterations.
# Default is `1000`.
# tau : float, optional
# Parameter for finding the step size in the steepest descent direction.
# Default is `1e-6`.
# eps1 : float, optional
# Stopage criterion related to the maximum absolute value of the gradient.
# Default is `1e-8`.
# eps2 : float, optional
# Stopage criterion related to the improvement between iterations.
# Default is `1e-8`.
# Returns
# -------
# list
# * The optimal values for the optimization variables.
# * The reason for stopping.
# * The final iteration.
# * The final gradient.
# Notes
# -----
# References
# ----------
# Examples
# --------
# >>>
# """
# x = asarray(x0).reshape((-1, 1))
# n = len(x)
# f, J = cost(x, *args)
# A = dot(J.transpose(), J)
# a = diagonal(A)
# g = dot(J.transpose(), f)
# k = 0
# v = 2
# mu = tau * max(a)
# stop = 'max(abs(g)): %s > %s' % (max(abs(g)), eps1)
# while max(abs(g)) > eps1 :
# dx = solve(-(A + mu * eye(n)), g)
# xn = x + dx
# # should the gain be calculated based on dq rather than dqind?
# # differences between iterations become too small
# # this means that we are at a local minimum, i think
# if norm(dx) <= eps2 * norm(xn) :
# stop = 'differences: %s <= %s' % (norm(dx), eps2 * norm(xn))
# break
# fn = cost(xn, *args, out=1)
# # gain ratio
# # assess whether to switch between ...
# gain = (0.5 * (sum(power(f, 2)) - sum(power(fn, 2)))) / (0.5 * dx.transpose() * (mu * dx - g))
# if gain > 0 :
# x = xn
# f, J = cost(x, *args, out=2)
# A = dot(J.transpose(), J)
# a = diagonal(A)
# g = dot(J.transpose(), f)
# mu = mu * max([1/3, 1 - (2 * gain - 1)**3])
# v = 2
# else :
# mu = mu * v
# v = 2 * v
# k = k + 1
# if k >= kmax :
# stop = 'kmax'
# break
# return [x, stop, k, g]
# # ==============================================================================
# # Main
# # ==============================================================================
# if __name__ == "__main__":
# pass
|
tools/metrics/utils/buildsystem.py | ivafanas/sltbench | 146 | 12604356 | from collections import namedtuple
MAKE = 'make'
NINJA = 'ninja'
ALL = [MAKE, NINJA]
_BuildSystem = namedtuple('BuildSystem', 'cmake_generator,build_command')
def create(args):
bs = args.build_system
if bs == MAKE:
return _BuildSystem(cmake_generator='Unix Makefiles',
build_command='make -j 1')
if bs == NINJA:
return _BuildSystem(cmake_generator='Ninja',
build_command='ninja -j 1')
raise RuntimeError('Unsupported build system: {}'.format(bs))
|
rpython/annotator/policy.py | nanjekyejoannah/pypy | 381 | 12604383 | # base annotation policy for specialization
from rpython.annotator.specialize import default_specialize as default
from rpython.annotator.specialize import (
specialize_argvalue, specialize_argtype, specialize_arglistitemtype,
specialize_arg_or_var, memo, specialize_call_location)
from rpython.flowspace.operation import op
from rpython.flowspace.model import Constant
from rpython.annotator.model import SomeTuple
class AnnotatorPolicy(object):
"""
Possibly subclass and pass an instance to the annotator to control
special-casing during annotation
"""
def event(pol, bookkeeper, what, *args):
pass
def get_specializer(pol, directive):
if directive is None:
return pol.default_specialize
# specialize[(args)]
directive_parts = directive.split('(', 1)
if len(directive_parts) == 1:
[name] = directive_parts
parms = ()
else:
name, parms = directive_parts
try:
parms = eval("(lambda *parms: parms)(%s" % parms)
except (KeyboardInterrupt, SystemExit):
raise
except:
raise Exception("broken specialize directive parms: %s" % directive)
name = name.replace(':', '__')
try:
specializer = getattr(pol, name)
except AttributeError:
raise AttributeError("%r specialize tag not defined in annotation"
"policy %s" % (name, pol))
else:
if not parms:
return specializer
else:
def specialize_with_parms(funcdesc, args_s):
return specializer(funcdesc, args_s, *parms)
return specialize_with_parms
# common specializations
default_specialize = staticmethod(default)
specialize__memo = staticmethod(memo)
specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N)
specialize__arg_or_var = staticmethod(specialize_arg_or_var)
specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N)
specialize__arglistitemtype = staticmethod(specialize_arglistitemtype)
specialize__call_location = staticmethod(specialize_call_location)
def specialize__ll(pol, *args):
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
return LowLevelAnnotatorPolicy.default_specialize(*args)
def specialize__ll_and_arg(pol, *args):
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args)
def no_more_blocks_to_annotate(pol, annotator):
bk = annotator.bookkeeper
# hint to all pending specializers that we are done
for callback in bk.pending_specializations:
callback()
del bk.pending_specializations[:]
if annotator.added_blocks is not None:
all_blocks = annotator.added_blocks
else:
all_blocks = annotator.annotated
for block in list(all_blocks):
for i, instr in enumerate(block.operations):
if not isinstance(instr, (op.simple_call, op.call_args)):
continue
v_func = instr.args[0]
s_func = annotator.annotation(v_func)
if not hasattr(s_func, 'needs_sandboxing'):
continue
key = ('sandboxing', s_func.const)
if key not in bk.emulated_pbc_calls:
params_s = s_func.args_s
s_result = s_func.s_result
from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline
sandbox_trampoline = make_sandbox_trampoline(
s_func.name, params_s, s_result)
sandbox_trampoline._signature_ = [SomeTuple(items=params_s)], s_result
bk.emulate_pbc_call(key, bk.immutablevalue(sandbox_trampoline), params_s)
else:
s_trampoline = bk.emulated_pbc_calls[key][0]
sandbox_trampoline = s_trampoline.const
new = instr.replace({instr.args[0]: Constant(sandbox_trampoline)})
block.operations[i] = new
|
armi/bookkeeping/tests/__init__.py | keckler/armi | 162 | 12604391 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Bookkeeping test package.
This may seem a little bit over-engineered, but the jupyter notebooks that get run by
the test_historyTracker are also used in the documentation system, so providing a list
of related files from this package is useful. Also, these are organized like this to
prevent having to import the world just to get something like a list of strings.
"""
from ._constants import *
|
Val_model_subpixel.py | Dai-z/pytorch-superpoint | 390 | 12604418 | """script for subpixel experiment (not tested)
"""
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from tqdm import tqdm
from utils.loader import dataLoader, modelLoader, pretrainedLoader
import logging
from utils.tools import dict_update
from utils.utils import labels2Dto3D, flattenDetection, labels2Dto3D_flattened
from utils.utils import pltImshow, saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
from pathlib import Path
@torch.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='cpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device=device
pass
def loadModel(self):
# model = 'SuperPointNet'
# params = self.config['model']['subpixel']['params']
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = torch.load(self.weights_path,
map_location=lambda storage, loc: storage)
self.net.load_state_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s', self.weights_path)
pass
def extract_patches(self, label_idx, img):
"""
input:
label_idx: tensor [N, 4]: (batch, 0, y, x)
img: tensor [batch, channel(1), H, W]
"""
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device), img.to(self.device),
patch_size=patch_size)
return patches
pass
def run(self, patches):
"""
"""
with torch.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
# filename = 'configs/magicpoint_shapes_subpix.yaml'
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_default_tensor_type(torch.FloatTensor)
with open(filename, 'r') as f:
config = yaml.load(f)
task = config['data']['dataset']
# data loading
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
# take one sample
for i, sample in tqdm(enumerate(test_loader)):
if i>1: break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
# points from heatmap
img = sample['image']
print("image: ", img.shape)
points = torch.tensor([[1,2], [3,4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = torch.zeros(num_of_points, 1).float()
points = torch.cat((cols, cols, points.float()), dim=1)
return points
label_idx = points_to_4d(points)
# concat points to be (batch, 0, y, x)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
|
freepie-samples/sample1.py | d2002b/alvr1 | 1,946 | 12604469 | <gh_stars>1000+
# Click trackpad of first controller by "C" key
alvr.buttons[0][alvr.Id("trackpad_click")] = keyboard.getKeyDown(Key.C)
alvr.buttons[0][alvr.Id("trackpad_touch")] = keyboard.getKeyDown(Key.C)
# Move trackpad position by arrow keys
if keyboard.getKeyDown(Key.LeftArrow):
alvr.trackpad[0][0] = -1.0
alvr.trackpad[0][1] = 0.0
elif keyboard.getKeyDown(Key.UpArrow):
alvr.trackpad[0][0] = 0.0
alvr.trackpad[0][1] = 1.0
elif keyboard.getKeyDown(Key.RightArrow):
alvr.trackpad[0][0] = 1.0
alvr.trackpad[0][1] = 0.0
elif keyboard.getKeyDown(Key.DownArrow):
alvr.trackpad[0][0] = 0.0
alvr.trackpad[0][1] = -1.0
|
data_pipeline/helpers/priority_refresh_queue.py | poros/data_pipeline | 110 | 12604482 | <reponame>poros/data_pipeline
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from data_pipeline.schematizer_clientlib.models.refresh import RefreshStatus
class EmptyQueueError(Exception):
def __init__(self, source_name):
Exception.__init__(
self, "Trying to pop from empty queue ({})".format(source_name)
)
class PriorityRefreshQueue(object):
"""
PriorityRefreshQueue orders paused/non-started jobs in the queue by:
- higher priority > lower priority
- paused status > not_started status
- older > newer
in that order of preference
(i.e an older paused job will be beaten by any job with a higher priority)
The only public ways to add/remove jobs from this queue are add_refreshes_to_queue and pop.
We could implement this faster, but this is unnecessary as we have ample time between
schematizer polls.
Works for multiple sources within a single namespace but not across namespaces
(since source_names are only unique within a namespace).
Should only manage jobs that are either paused or non-started. This means when starting a job
retrieved from peek, it should be popped from the queue.
"""
def __init__(self):
self.source_to_refresh_queue = {}
self.refresh_ref = {}
def _add_refresh_to_queue(self, refresh):
if refresh.refresh_id not in self.refresh_ref:
if refresh.source_name not in self.source_to_refresh_queue:
self.source_to_refresh_queue[refresh.source_name] = []
self.source_to_refresh_queue[refresh.source_name].append(
refresh.refresh_id
)
self.refresh_ref[refresh.refresh_id] = refresh
def _top_refresh(self, source_name):
return self.refresh_ref[
self.source_to_refresh_queue[source_name][0]
]
def _sort_by_ascending_age(self, queue):
return sorted(
queue,
key=lambda refresh_id: self.refresh_ref[refresh_id].created_at
)
def _sort_by_paused_first(self, queue):
return sorted(
queue,
key=lambda refresh_id:
(0 if self.refresh_ref[refresh_id].status == RefreshStatus.PAUSED else 1)
)
def _sort_by_descending_priority(self, queue):
return sorted(
queue,
key=lambda refresh_id: self.refresh_ref[refresh_id].priority,
reverse=True
)
def _sort_refresh_queue(self, queue):
queue = self._sort_by_ascending_age(queue)
queue = self._sort_by_paused_first(queue)
return self._sort_by_descending_priority(queue)
def add_refreshes_to_queue(self, refreshes):
for refresh in refreshes:
self._add_refresh_to_queue(refresh)
for source, queue in self.source_to_refresh_queue.iteritems():
self.source_to_refresh_queue[source] = self._sort_refresh_queue(queue)
def peek(self):
"""Returns a dict of the top refresh for each source in the queue"""
return {
source_name: self._top_refresh(source_name)
for source_name in self.source_to_refresh_queue
}
def pop(self, source_name):
"""Removes and returns the top refresh for the given source using its name
(Note: source_name does not include its namespace)"""
if source_name not in self.source_to_refresh_queue:
raise EmptyQueueError(source_name)
refresh_id = self.source_to_refresh_queue[source_name].pop(0)
item = self.refresh_ref.pop(refresh_id)
if not self.source_to_refresh_queue[source_name]:
del self.source_to_refresh_queue[source_name]
return item
|
problems/14/solution_14.py | r1cc4rdo/daily_coding_problem | 158 | 12604493 | <gh_stars>100-1000
import math
import random
def coding_problem_14():
"""
The area of a circle is defined as $\pi r^2$. Estimate $\pi$ to 3 decimal places using a Monte Carlo method.
Example:
>>> import math
>>> import random
>>> random.seed(0xBEEF)
>>> pi_approx = coding_problem_14()
>>> abs(math.pi - pi_approx) < 1e-2
True
Note: the unit test above is not testing for 3 decimal places, but only 2. Getting to 3 significant digits would
require too much time each time, since convergence is exponentially slow. Also priming the random number generator
to avoid random failure for unlucky distributions of samples.
"""
inside, total, prev_pi, pi_approx = 0, 0, 0, 1
while abs(pi_approx - prev_pi) > 1e-5:
total += 10000
for _ in range(10000):
x, y = random.random(), random.random()
inside += (x**2 + y**2) < 1 # 1**2 == 1
prev_pi, pi_approx = pi_approx, (4. * inside) / total
return pi_approx
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
tests/test_init.py | google/evojax | 365 | 12604569 | <reponame>google/evojax<filename>tests/test_init.py
# Copyright 2022 The EvoJAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TestTask:
def test_cartpole(self):
from evojax.task.cartpole import CartPoleSwingUp
_ = CartPoleSwingUp()
assert True
def test_seq2seq(self):
from evojax.task.seq2seq import Seq2seqTask
_ = Seq2seqTask()
assert True
def test_waterworld(self):
from evojax.task.waterworld import WaterWorld
_ = WaterWorld()
assert True
def test_waterworld_ma(self):
from evojax.task.ma_waterworld import MultiAgentWaterWorld
_ = MultiAgentWaterWorld()
assert True
def test_flocing(self):
from evojax.task.flocking import FlockingTask
_ = FlockingTask()
assert True
class TestPolicy:
def test_seq2seq(self):
from evojax.policy import Seq2seqPolicy
_ = Seq2seqPolicy()
assert True
def test_mlp(self):
from evojax.policy import MLPPolicy
_ = MLPPolicy(input_dim=16, hidden_dims=(16, 16), output_dim=16)
assert True
def test_mlp_pi(self):
from evojax.policy import PermutationInvariantPolicy
_ = PermutationInvariantPolicy(act_dim=16, hidden_dim=16)
assert True
def test_convnet(self):
from evojax.policy import ConvNetPolicy
_ = ConvNetPolicy()
assert True
class TestAlgo:
def test_pgpe(self):
from evojax.algo import PGPE
_ = PGPE(pop_size=16, param_size=16)
assert True
|
pseudo/api_translators/cpp_api_handlers.py | mifieldxu/pseudo-lang | 661 | 12604608 | <reponame>mifieldxu/pseudo-lang<gh_stars>100-1000
from pseudo.pseudo_tree import Node, call, method_call, local, assignment, to_node
from pseudo.api_handlers import BizarreLeakingNode, NormalLeakingNode
class Read(BizarreLeakingNode):
'''
transforms `io:read`
`a = io:read()`
`cin << a`
'''
def temp_name(self, target):
return '_read_result'
def as_expression(self):
return [
Node('_cpp_declaration',
name='_dummy',
args=[],
decl_type='String',
pseudo_type='Void'),
Node('_cpp_cin',
args=[local('_dummy', 'String')])
], None
def as_assignment(self, target):
return [Node('_cpp_cin', args=[target])]
class Slice(BizarreLeakingNode):
'''
transforms `List:slice..`
'''
def temp_name(self, target):
return '_sliced'
def as_expression(self):
# pseudo_type=self.args[0].pseudo_type
begin = method_call(self.args[0], 'begin', [], 'CppIterator')
end = method_call(self.args[0], 'end', [], 'CppIterator')
if self.name == 'slice_to':
from_, to = to_node(0), self.args[1]
else:
from_, to = self.args[1], self.args[2]
if from_.type == 'int' and from_.value == 0:
start = begin
else:
start = Node('binary_op', op='+', left=begin, right=from_, pseudo_type='CppIterator')
if self.name == 'slice_from':
finish = end
elif to.type == 'int' and to.value < 0:
finish = Node('binary_op', op='-', left=end, right=to_node(-to.value))
else:
finish = Node('binary_op', op='+', left=begin, right=to, pseudo_type='CppIterator')
return [
Node('_cpp_declaration',
name='_sliced',
args=[start, finish],
decl_type=self.args[0].pseudo_type,
pseudo_type='Void')], None
def as_assignment(self, target):
expression = self.as_expression()[0][0]
expression.name = target.name
return [expression]
class ReadFile(BizarreLeakingNode):
'''
transforms `io:read_file`
'''
def temp_name(self, target):
return '_file_contents'
def as_expression(self):
return [Node('_cpp_declaration',
name='ifs',
args=[to_node('f.py')],
decl_type='ifstream',
pseudo_type='Void'),
Node('_cpp_declaration',
name=self.temp_name(None),
args=[Node('_cpp_group',
value=Node('_cpp_anon_declaration',
args=[local('ifs', 'ifstream')],
decl_type='istreambuf_iterator<char>',
pseudo_type='Void')),
Node('_cpp_group',
value=Node('_cpp_anon_declaration',
args=[],
decl_type='istreambuf_iterator<char>',
pseudo_type='Void'))],
decl_type='String',
pseudo_type='Void')], None
def as_assignment(self, target):
e = self.as_expression()[0]
e[1].name = target.name
return e |
dnachisel/Specification/SpecEvaluation/ProblemConstraintsEvaluations.py | simone-pignotti/DnaChisel | 124 | 12604611 | from .SpecEvaluation import SpecEvaluation
from .SpecEvaluations import SpecEvaluations
class ProblemConstraintsEvaluations(SpecEvaluations):
"""Special multi-evaluation class for all constraints of a same problem.
See submethod ``.from_problem``
"""
specifications_role = "constraint"
@staticmethod
def from_problem(problem, autopass_constraints=True):
"""Create an instance by evaluating all constraints in the problem.
The ``problem`` is a DnaChisel DnaOptimizationProblem.
"""
def evaluate(constraint):
if (
autopass_constraints
and constraint.enforced_by_nucleotide_restrictions
):
return SpecEvaluation(
constraint,
problem,
score=1,
locations=[],
message="Enforced by nucleotides restrictions",
)
else:
return constraint.evaluate(problem)
return ProblemConstraintsEvaluations(
[evaluate(constraint) for constraint in problem.constraints],
problem=problem,
)
def success_failure_color(self, evaluation):
"""Return color #60f979 if evaluation.passes else #f96c60."""
return "#60f979" if evaluation.passes else "#f96c60"
def text_summary_message(self):
"""Return a global SUCCESS or FAILURE message for all evaluations."""
failed = [e for e in self.evaluations if not e.passes]
if failed == []:
return "SUCCESS - all constraints evaluations pass"
else:
return "FAILURE: %d constraints evaluations failed" % len(failed)
|
python/traffic_manager.py | APinkLemon/Learn-Carla | 148 | 12604636 | <reponame>APinkLemon/Learn-Carla<gh_stars>100-1000
import argparse
import carla
import cv2
import logging
import time
import numpy as np
from numpy import random
from queue import Queue
from queue import Empty
def parser():
argparser = argparse.ArgumentParser(
description=__doc__)
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-n', '--number-of-vehicles',
metavar='N',
default=20,
type=int,
help='number of vehicles (default: 30)')
argparser.add_argument(
'-d', '--number-of-dangerous-vehicles',
metavar='N',
default=1,
type=int,
help='number of dangerous vehicles (default: 3)')
argparser.add_argument(
'--tm-port',
metavar='P',
default=8000,
type=int,
help='port to communicate with TM (default: 8000)')
argparser.add_argument(
'--sync',
action='store_true',
default=True,
help='Synchronous mode execution')
return argparser.parse_args()
def sensor_callback(sensor_data, sensor_queue):
array = np.frombuffer(sensor_data.raw_data, dtype=np.dtype("uint8"))
# image is rgba format
array = np.reshape(array, (sensor_data.height, sensor_data.width, 4))
array = array[:, :, :3]
sensor_queue.put((sensor_data.frame, array))
def main():
args = parser()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
vehicles_id_list = []
client = carla.Client(args.host, args.port)
client.set_timeout(10.0)
synchronous_master = False
try:
world = client.get_world()
origin_settings = world.get_settings()
traffic_manager = client.get_trafficmanager(args.tm_port)
# every vehicle keeps a distance of 3.0 meter
traffic_manager.set_global_distance_to_leading_vehicle(3.0)
# Set physical mode only for cars around ego vehicle to save computation
traffic_manager.set_hybrid_physics_mode(True)
# default speed is 30
traffic_manager.global_percentage_speed_difference(80)
# Suggest using syncmode
if args.sync:
settings = world.get_settings()
traffic_manager.set_synchronous_mode(True)
if not settings.synchronous_mode:
synchronous_master = True
settings.synchronous_mode = True
# 20fps
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
blueprints_vehicle = world.get_blueprint_library().filter("vehicle.*")
# sort the vehicle list by id
blueprints_vehicle = sorted(blueprints_vehicle, key=lambda bp: bp.id)
spawn_points = world.get_map().get_spawn_points()
number_of_spawn_points = len(spawn_points)
if args.number_of_vehicles < number_of_spawn_points:
random.shuffle(spawn_points)
elif args.number_of_vehicles >= number_of_spawn_points:
msg = 'requested %d vehicles, but could only find %d spawn points'
logging.warning(msg, args.number_of_vehicles, number_of_spawn_points)
args.number_of_vehicles = number_of_spawn_points - 1
# Use command to apply actions on batch of data
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
# this is equal to int 0
FutureActor = carla.command.FutureActor
batch = []
for n, transform in enumerate(spawn_points):
if n >= args.number_of_vehicles:
break
blueprint = random.choice(blueprints_vehicle)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
# set autopilot
blueprint.set_attribute('role_name', 'autopilot')
# spawn the cars and set their autopilot all together
batch.append(SpawnActor(blueprint, transform)
.then(SetAutopilot(FutureActor, True, traffic_manager.get_port())))
# excute the command
for (i, response) in enumerate(client.apply_batch_sync(batch, synchronous_master)):
if response.error:
logging.error(response.error)
else:
print("Fucture Actor", response.actor_id)
vehicles_id_list.append(response.actor_id)
vehicles_list = world.get_actors().filter('vehicle.*')
# wait for a tick to ensure client receives the last transform of the vehicles we have just created
if not args.sync or not synchronous_master:
world.wait_for_tick()
else:
world.tick()
# set several of the cars as dangerous car
for i in range(args.number_of_dangerous_vehicles):
danger_car = vehicles_list[i]
# crazy car ignore traffic light, do not keep safe distance, and very fast
traffic_manager.ignore_lights_percentage(danger_car, 100)
traffic_manager.distance_to_leading_vehicle(danger_car, 0)
traffic_manager.vehicle_percentage_speed_difference(danger_car, -50)
print('spawned %d vehicles , press Ctrl+C to exit.' % (len(vehicles_list)))
# create ego vehicle
ego_vehicle_bp = world.get_blueprint_library().find('vehicle.mercedes-benz.coupe')
# green color
ego_vehicle_bp.set_attribute('color', '0, 255, 0')
# set this one as ego
ego_vehicle_bp.set_attribute('role_name', 'hero')
# get a valid transform that has not been assigned yet
transform = spawn_points[len(vehicles_id_list)]
ego_vehicle = world.spawn_actor(ego_vehicle_bp, transform)
ego_vehicle.set_autopilot(True, args.tm_port)
vehicles_id_list.append(ego_vehicle.id)
# create sensor queue
sensor_queue = Queue(maxsize=10)
# create camera
camera_bp = world.get_blueprint_library().find('sensor.camera.rgb')
# camera relative position related to the vehicle
camera_transform = carla.Transform(carla.Location(x=1.5, z=2.4))
camera = world.spawn_actor(camera_bp, camera_transform, attach_to=ego_vehicle)
# set the callback function
camera.listen(lambda image: sensor_callback(image, sensor_queue))
while True:
if args.sync and synchronous_master:
world.tick()
try:
s_frame = sensor_queue.get(True, 1.0)
print("Camera Frame: %d" % (s_frame[0]))
# show image in a poping window
cv2.imshow('camera', s_frame[1])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except Empty:
print("Some of the sensor information is missed")
else:
world.wait_for_tick()
finally:
world.apply_settings(origin_settings)
print('\ndestroying %d vehicles' % len(vehicles_id_list))
client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_id_list])
camera.destroy()
cv2.destroyAllWindows()
time.sleep(0.5)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
print('\ndone.')
|
examples/from-wiki/demo_meta_matrixmul_cheetah.py | hesom/pycuda | 1,264 | 12604647 | <filename>examples/from-wiki/demo_meta_matrixmul_cheetah.py
#!python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyCuda Optimized Matrix Multiplication
Template Meta-programming Example using Cheetah
(modified from SciPy09 Advanced Tutorial)
"""
# ------------------------------------------------------------------------------
import numpy as np
from pycuda import driver, compiler, gpuarray, tools
from Cheetah.Template import Template
import pycuda.autoinit
# -- default parameters
DEFAULT_BLOCK_SIZE = 16
DEFAULT_WORK_SIZE = 1
DEFAULT_UNROLL = 0
DEFAULT_SPILL = False
DEFAULT_PREFETCH = False
from os import path
MYPATH = path.dirname(path.abspath(__file__))
TEMPLATE_FILENAME = path.join(MYPATH, "demo_meta_matrixmul_cheetah.template.cu")
# ------------------------------------------------------------------------------
def matrixmul_opt(mat_a, mat_b,
block_size = DEFAULT_BLOCK_SIZE,
work_size = DEFAULT_WORK_SIZE,
unroll = DEFAULT_UNROLL,
spill = DEFAULT_SPILL,
prefetch = DEFAULT_PREFETCH):
ah, aw = mat_a.shape
bh, bw = mat_b.shape
assert aw == bh
# -- pad input matrices appropriately
ah_padded = int(np.ceil(ah/block_size)) * block_size
aw_padded = int(np.ceil(aw/block_size)) * (block_size*work_size)
mat_a_padded = np.zeros((ah_padded, aw_padded), np.float32)
mat_a_padded[:ah,:aw] = mat_a
bh_padded = aw_padded
bw_padded = int(np.ceil(bw/(block_size*work_size))) * (block_size*work_size)
mat_b_padded = np.zeros((bh_padded, bw_padded), np.float32)
mat_b_padded[:bh, :bw] = mat_b
ch_padded = ah_padded
cw_padded = bw_padded
# -- upload padded input matrices to the GPU
mat_a_gpu = gpuarray.to_gpu(mat_a_padded)
mat_b_gpu = gpuarray.to_gpu(mat_b_padded)
# -- create empty container matrix for the result (C = A * B)
mat_c_gpu = gpuarray.zeros((ch_padded, cw_padded), np.float32)
# -- generate and compile the code
# prepare the template parameters
template_params = {
'BLOCK_SIZE': block_size,
'WORK_SIZE': work_size,
'UNROLL': unroll,
'SPILL': spill,
'PREFETCH': prefetch,
'A_WIDTH': aw_padded,
'A_HEIGHT': ah_padded,
'B_WIDTH': bw_padded,
}
# run the template engine to get the code
kernel_code = Template(
file = TEMPLATE_FILENAME,
searchList = [template_params],
)
# compile the code
module = compiler.SourceModule(kernel_code)
# get the kernel from the module
matrixmul_func = module.get_function("matrixMul")
# some info about the module
print("number of registers used:", matrixmul_func.num_regs)
# block of threads
# ATTENTION: block is (threadDim.x, threadDim.y, threadDim.z)
# and not (threadDim.z, threadDim.y, threadDim.x)
block = block_size, block_size, 1
# grid of blocks
# ATTENTION: it's (blockDim.x, blockDim.y)
# and not (blockDim.y, blockDim.x)
grid = int(cw_padded/block_size/work_size), int(ch_padded/block_size)
# -- call the kernel on the GPU
# Note that when we use time_kernel=True pycuda will automatically synchronize the kernel
# to make sure that the timing is correct. If you time the code yourself, you'll have to
# synchronize the current Context.
gpu_time = matrixmul_func(
# -- output
mat_c_gpu,
# -- inputs
mat_a_gpu, mat_b_gpu,
# -- grid of blocks
grid = grid,
# -- block of threads
block = block,
# -- time the kernel (approx.)
time_kernel = True,
)
# get the GPU matrix back to CPU memory
mat_c_padded = mat_c_gpu.get()
mat_c = mat_c_padded[:ah, :bw]
return mat_c, gpu_time
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# matrix sizes
a_height = 1024
a_width = 1024
b_height = a_width
b_width = 1024
# create random square matrices
np.random.seed(0)
mat_a = np.random.randn(a_height, a_width).astype(np.float32)
mat_b = np.random.randn(b_height, b_width).astype(np.float32)
# compute reference on the cpu to verify GPU computation
mat_ref = np.dot(mat_a, mat_b)
# -- this is a good place to auto-tune the code (using the optimization kwargs)
# (note that you may need more that one iteration to get accurate timing estimates)
mat_c, gpu_time = matrixmul_opt(mat_a, mat_b)
# check for correctness
diff = mat_c - mat_ref
error = np.absolute(diff).max()
assert error <= 1e-2
l2norm = np.linalg.norm(diff)
print("l2norm: ", l2norm)
# print some stats
print("gpu time:", gpu_time)
gflop = mat_c.size * (a_width * 2.) / (1000**3.)
gflops = gflop / gpu_time
print("gflops:", gflops)
|
xmodaler/lr_scheduler/noam_lr.py | cclauss/xmodaler | 830 | 12604677 | <reponame>cclauss/xmodaler<gh_stars>100-1000
import torch
from xmodaler.config import configurable
from .build import LR_SCHEDULER_REGISTRY
@LR_SCHEDULER_REGISTRY.register()
class NoamLR(torch.optim.lr_scheduler._LRScheduler):
@configurable
def __init__(
self,
*,
optimizer,
model_size,
factor,
warmup,
last_epoch=-1,
):
self.warmup = warmup
self.factor = factor
self.model_size = model_size
super(NoamLR, self).__init__(optimizer, last_epoch)
@classmethod
def from_config(cls, cfg, optimizer, data_size):
return {
"optimizer": optimizer,
"model_size": cfg.LR_SCHEDULER.MODEL_SIZE,
"factor": cfg.LR_SCHEDULER.FACTOR,
"warmup": cfg.LR_SCHEDULER.WARMUP, # iterations
"last_epoch": -1
}
def get_lr(self):
return [
self.factor * \
(self.model_size ** (-0.5) *
min((self.last_epoch + 1) ** (-0.5), (self.last_epoch + 1) * self.warmup ** (-1.5)))
for base_lr in self.base_lrs
] |
tests/test_utils.py | fluxility/drf-haystack | 201 | 12604713 | <reponame>fluxility/drf-haystack
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from drf_haystack.utils import merge_dict
class MergeDictTestCase(TestCase):
def setUp(self):
self.dict_a = {
"person": {
"lastname": "Holmes",
"combat_proficiency": [
"Pistol",
"boxing"
]
},
}
self.dict_b = {
"person": {
"gender": "male",
"firstname": "Sherlock",
"location": {
"address": "221B Baker Street"
},
"combat_proficiency": [
"sword",
"Martial arts",
]
}
}
def test_utils_merge_dict(self):
self.assertEqual(merge_dict(self.dict_a, self.dict_b), {
"person": {
"gender": "male",
"firstname": "Sherlock",
"lastname": "Holmes",
"location": {
"address": "221B Baker Street"
},
"combat_proficiency": [
"Martial arts",
"Pistol",
"boxing",
"sword",
]
}
})
def test_utils_merge_dict_invalid_input(self):
self.assertEqual(merge_dict(self.dict_a, "I'm not a dict!"), "I'm not a dict!")
|
tests/test_cswrapper.py | CSchulzeTLK/FMPy | 225 | 12604740 | <reponame>CSchulzeTLK/FMPy<filename>tests/test_cswrapper.py
import unittest
from fmpy import read_model_description, simulate_fmu
from fmpy.util import download_test_file
from fmpy.cswrapper import add_cswrapper
class CSWrapperTest(unittest.TestCase):
def test_cswrapper(self):
filename = 'CoupledClutches.fmu'
download_test_file('2.0', 'ModelExchange', 'MapleSim', '2016.2', 'CoupledClutches', filename)
model_description = read_model_description(filename)
self.assertIsNone(model_description.coSimulation)
add_cswrapper(filename)
simulate_fmu(filename, fmi_type='CoSimulation')
|
st2common/benchmarks/micro/test_mongo_transport_compression.py | momokuri-3/st2 | 4,920 | 12604774 | <filename>st2common/benchmarks/micro/test_mongo_transport_compression.py<gh_stars>1000+
# Copyright 2021 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmarks which measure how much overhead enabling transport / network level MongoDB compression
adds.
"""
from st2common.util.monkey_patch import monkey_patch
monkey_patch()
import os
import pytest
from oslo_config import cfg
from mongoengine.connection import disconnect
from st2common.service_setup import db_setup
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.liveaction import LiveAction
from common import FIXTURES_DIR
from common import PYTEST_FIXTURE_FILE_PARAM_DECORATOR
@PYTEST_FIXTURE_FILE_PARAM_DECORATOR
@pytest.mark.parametrize(
"compression",
[
None,
"zstd",
],
ids=[
"none",
"zstd",
],
)
@pytest.mark.benchmark(group="test_model_save")
def test_save_execution(benchmark, fixture_file: str, compression):
with open(os.path.join(FIXTURES_DIR, fixture_file), "rb") as fp:
content = fp.read()
cfg.CONF.set_override(name="compressors", group="database", override=compression)
# NOTE: It's important we correctly reestablish connection before each setting change
disconnect()
connection = db_setup()
if compression is None:
assert "compressors" not in str(connection)
elif compression == "zstd":
assert "compressors=['zstd']" in str(connection)
def run_benchmark():
live_action_db = LiveActionDB()
live_action_db.status = "succeeded"
live_action_db.action = "core.local"
live_action_db.result = content
inserted_live_action_db = LiveAction.add_or_update(live_action_db)
return inserted_live_action_db
inserted_live_action_db = benchmark(run_benchmark)
assert inserted_live_action_db.result == content
@PYTEST_FIXTURE_FILE_PARAM_DECORATOR
@pytest.mark.parametrize(
"compression",
[
None,
"zstd",
],
ids=[
"none",
"zstd",
],
)
@pytest.mark.benchmark(group="test_model_read")
def test_read_execution(benchmark, fixture_file: str, compression):
with open(os.path.join(FIXTURES_DIR, fixture_file), "rb") as fp:
content = fp.read()
cfg.CONF.set_override(name="compressors", group="database", override=compression)
# NOTE: It's important we correctly reestablish connection before each setting change
disconnect()
connection = db_setup()
if compression is None:
assert "compressors" not in str(connection)
elif compression == "zstd":
assert "compressors=['zstd']" in str(connection)
live_action_db = LiveActionDB()
live_action_db.status = "succeeded"
live_action_db.action = "core.local"
live_action_db.result = content
inserted_live_action_db = LiveAction.add_or_update(live_action_db)
def run_benchmark():
retrieved_live_action_db = LiveAction.get_by_id(inserted_live_action_db.id)
return retrieved_live_action_db
retrieved_live_action_db = benchmark(run_benchmark)
# Assert that result is correctly converted back to dict on retrieval
assert retrieved_live_action_db == inserted_live_action_db
|
libtmux/_compat.py | jamosta/libtmux | 398 | 12604779 | # -*- coding: utf8 -*-
# flake8: NOQA
import sys
from collections.abc import MutableMapping
console_encoding = sys.__stdout__.encoding
def console_to_str(s):
"""From pypa/pip project, pip.backwardwardcompat. License MIT."""
try:
return s.decode(console_encoding, 'ignore')
except UnicodeDecodeError:
return s.decode('utf_8', 'ignore')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise (value.with_traceback(tb))
raise value
def str_from_console(s):
try:
return str(s)
except UnicodeDecodeError:
return str(s, encoding='utf_8')
|
docs/papers/sc2013/bench/pythran/pi_buffon.py | davidbrochart/pythran | 1,647 | 12604790 | #skip.runas pi_estimate(40000000)
#pythran export pi_estimate(int)
from math import sqrt, pow
from random import random
def pi_estimate(DARTS):
hits = 0
"omp parallel for private(i,x,y,dist), reduction(+:hits)"
for i in xrange (0, DARTS):
x = random()
y = random()
dist = sqrt(pow(x, 2) + pow(y, 2))
if dist <= 1.0:
hits += 1.0
# hits / throws = 1/4 Pi
pi = 4 * (hits / DARTS)
return pi
|
nlt/util/math.py | isabella232/neural-light-transport | 176 | 12604794 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
def sample_pdf(val, weights, n_samples, det=False, eps=1e-5):
weights += eps # prevent NaN's
pdf = weights / tf.reduce_sum(weights, -1, keepdims=True)
cdf = tf.cumsum(pdf, -1)
cdf = tf.concat((tf.zeros_like(cdf[:, :1]), cdf), -1)
if det:
u = tf.linspace(0., 1., n_samples)
u = tf.broadcast_to(u, cdf.shape[:-1] + (n_samples,))
else:
u = tf.random.uniform(cdf.shape[:-1] + (n_samples,))
# Invert CDF
ind = tf.searchsorted(cdf, u, side='right') # (n_rays, n_samples)
below = tf.maximum(0, ind - 1)
above = tf.minimum(ind, cdf.shape[-1] - 1)
ind_g = tf.stack((below, above), -1) # (n_rays, n_samples, 2)
cdf_g = tf.gather(cdf, ind_g, axis=-1, batch_dims=len(ind_g.shape) - 2)
val_g = tf.gather(val, ind_g, axis=-1, batch_dims=len(ind_g.shape) - 2)
denom = cdf_g[:, :, 1] - cdf_g[:, :, 0] # (n_rays, n_samples)
denom = tf.where(denom < eps, tf.ones_like(denom), denom)
t = (u - cdf_g[:, :, 0]) / denom
samples = val_g[:, :, 0] + t * (val_g[:, :, 1] - val_g[:, :, 0])
return samples # (n_rays, n_samples)
|
goatools/grouper/sorter_nts.py | flying-sheep/goatools | 477 | 12604797 | """Sorts GO IDs or user-provided sections containing GO IDs."""
__copyright__ = "Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved."
__author__ = "<NAME>"
class SorterNts(object):
"""Handles GO IDs in user-created sections.
* Get a 2-D list of sections:
sections = [
['Immune', [
"GO:HHHHHH0", "GO:UUUUU00", ... "GO:UUUUU0N", "GO:HHHHHH1", ...]],
['Neuro', [
"GO:HHHHHH2", "GO:UUUUU20", ... "GO:UUUUU2N", "GO:HHHHHH3", ...]],
]
Also contains function for various tasks on grouped GO IDs:
* Sort in various ways (sort by: p=value, depth, proximity to leaf-level, etc.):
* Header GO ID groups
* User GO IDs within a group
"""
def __init__(self, sortgos, section_sortby=None):
# User GO IDs grouped under header GO IDs are not sorted by the Grouper class.
# Sort both user GO IDs in a group and header GO IDs across groups with these:
# S: section_sortby (T=True, F=False, S=lambda sort function)
# H: hdrgo_sortby Sorts hdr GO IDs
# U: sortby Sorts user GO IDs
# P: hdrgo_prt If True, Removes GO IDs used as GO group headers; Leaves list in
# sorted order, but removes header GO IDs which are not user GO IDs.
#
# rm_h hdr_sort usr_sort S H U P
# --- ------------ ------------ _ _ _ -
# NO hdrgo_sortby usrgo_sortby T H U T
# YES hdrgo_sortby usrgo_sortby T H U F
# NO section_order usrgo_sortby F - U T
# YES section_order usrgo_sortby F - U F
# YES |<----section_sortby---->| S - - -
# print("SSSS SorterNts(sortgos, section_sortby={})".format(section_sortby))
self.sortgos = sortgos # SorterGoIds
# section_sortby: True, False or None, or a sort_fnc
self.section_sortby = section_sortby
self.sections = self.sortgos.grprobj.hdrobj.sections
# print('IIIIIIIIIIII SorterNts section_sortby', section_sortby)
def get_sorted_nts_keep_section(self, hdrgo_prt):
"""Get 2-D list: 1st level is sections and 2nd level is grouped and sorted namedtuples."""
section_nts = []
# print("SSSS SorterNts:get_sorted_nts_keep_section(hdrgo_prt={})".format(hdrgo_prt))
hdrgos_actual = self.sortgos.grprobj.get_hdrgos()
hdrgos_secs = set()
hdrgo_sort = False if self.section_sortby is False else True
secname_dflt = self.sortgos.grprobj.hdrobj.secdflt
for section_name, section_hdrgos_all in self.sections:
#section_hdrgos_act = set(section_hdrgos_all).intersection(hdrgos_actual)
section_hdrgos_act = [h for h in section_hdrgos_all if h in hdrgos_actual]
hdrgos_secs |= set(section_hdrgos_act)
nts_section = self.sortgos.get_nts_sorted(hdrgo_prt, section_hdrgos_act, hdrgo_sort)
if nts_section:
nts_section = self._get_sorted_section(nts_section)
section_nts.append((section_name, nts_section))
remaining_hdrgos = hdrgos_actual.difference(hdrgos_secs)
# Add GO group headers not yet used under new section, Misc.
if remaining_hdrgos:
nts_section = self.sortgos.get_nts_sorted(hdrgo_prt, remaining_hdrgos, hdrgo_sort)
if nts_section:
nts_section = self._get_sorted_section(nts_section)
section_nts.append((secname_dflt, nts_section))
return section_nts
def get_sorted_nts_omit_section(self, hdrgo_prt, hdrgo_sort):
"""Return a flat list of sections (wo/section names) with GO terms grouped and sorted."""
nts_flat = []
# print("SSSS SorterNts:get_sorted_nts_omit_section(hdrgo_prt={}, hdrgo_sort={})".format(
# hdrgo_prt, hdrgo_sort))
hdrgos_seen = set()
hdrgos_actual = self.sortgos.grprobj.get_hdrgos()
for _, section_hdrgos_all in self.sections:
#section_hdrgos_act = set(section_hdrgos_all).intersection(hdrgos_actual)
section_hdrgos_act = [h for h in section_hdrgos_all if h in hdrgos_actual]
hdrgos_seen |= set(section_hdrgos_act)
self.sortgos.get_sorted_hdrgo2usrgos(
section_hdrgos_act, nts_flat, hdrgo_prt, hdrgo_sort)
remaining_hdrgos = set(self.sortgos.grprobj.get_hdrgos()).difference(hdrgos_seen)
self.sortgos.get_sorted_hdrgo2usrgos(remaining_hdrgos, nts_flat, hdrgo_prt, hdrgo_sort)
return nts_flat
def _get_sorted_section(self, nts_section):
"""Sort GO IDs in each section, if requested by user."""
#pylint: disable=unnecessary-lambda
if self.section_sortby is True:
return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt))
if self.section_sortby is False or self.section_sortby is None:
return nts_section
# print('SORT GO IDS IN A SECTION')
return sorted(nts_section, key=lambda nt: self.section_sortby(nt))
# Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved.
|
atlas/foundations_local_docker_scheduler_plugin/src/test/test_cron_job_scheduler.py | DeepLearnI/atlas | 296 | 12604802 |
from foundations_spec import *
import unittest.mock
from foundations_local_docker_scheduler_plugin.cron_job_scheduler import CronJobScheduler, CronJobSchedulerError
class TestCronJobScheduler(Spec):
mock_get = let_patch_mock('requests.get')
mock_delete = let_patch_mock('requests.delete')
mock_patch = let_patch_mock('requests.patch')
mock_put = let_patch_mock('requests.put')
mock_user_token = let_patch_mock('foundations_authentication.user_token.user_token')
mock_successful_response_body = let_mock()
mock_cron_schedule = let_mock()
@let
def scheduler_host(self):
return self.faker.hostname()
@let
def scheduler_port(self):
return self.faker.random.randint(80, 30000)
@let
def scheduler_uri(self):
return f'http://{self.scheduler_host}:{self.scheduler_port}'
@let
def default_scheduler_uri(self):
return f'http://localhost:5000'
@let
def scheduler(self):
return CronJobScheduler(scheduler_url=self.scheduler_uri)
@let
def scheduler_default_args(self):
return CronJobScheduler()
@let
def job_id(self):
return self.faker.word()
@let
def error_message(self):
return self.faker.sentence()
@let
def error_response(self):
response = Mock()
response.text = self.error_message
return response
@let
def success_response_204(self):
response = Mock()
response.status_code = 204
return response
@let
def success_response_200(self):
response = Mock()
response.status_code = 200
response.json.return_value = self.mock_successful_response_body
return response
@let
def project_name(self):
return self.faker.word()
@set_up
def set_up(self):
self.mock_delete.return_value = self.success_response_204
self.mock_patch.return_value = self.success_response_204
self.mock_put.return_value = self.success_response_204
self.mock_get.return_value = self.success_response_200
self.mock_user_token.return_value = 'Token'
def test_pause_scheduled_job_calls_correct_endpoint(self):
self.scheduler.pause_job(self.job_id)
request_payload = {'status': 'paused'}
self.mock_put.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'}, json=request_payload)
def test_pause_scheduled_job_calls_correct_endpoint_when_constructed_with_defaults(self):
self.scheduler_default_args.pause_job(self.job_id)
request_payload = {'status': 'paused'}
self.mock_put.assert_called_once_with(f'{self.default_scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'}, json=request_payload)
def test_pause_scheduled_job_raises_cron_job_scheduler_error_if_job_does_not_exist(self):
self.error_response.status_code = 404
self.mock_put.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.pause_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_pause_scheduled_job_raises_cron_job_scheduler_error_if_bad_request(self):
self.error_response.status_code = 400
self.mock_put.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.pause_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_resume_scheduled_job_calls_correct_endpoint(self):
self.scheduler.resume_job(self.job_id)
request_payload = {'status': 'active'}
self.mock_put.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'}, json=request_payload)
def test_resume_scheduled_job_raises_cron_job_scheduler_error_if_job_does_not_exist(self):
self.error_response.status_code = 404
self.mock_put.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.resume_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_resume_scheduled_job_raises_cron_job_scheduler_error_if_bad_request(self):
self.error_response.status_code = 400
self.mock_put.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.resume_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_delete_scheduled_job_calls_correct_endpoint(self):
self.scheduler.delete_job(self.job_id)
self.mock_delete.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'})
def test_delete_scheduled_job_raises_cron_job_scheduler_error_if_job_does_not_exist(self):
self.error_response.status_code = 404
self.mock_delete.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.delete_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_get_scheduled_job_calls_correct_endpoint(self):
self.scheduler.get_job(self.job_id)
self.mock_get.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'})
def test_get_scheduled_job_returns_job_data_from_scheduler(self):
response = self.scheduler.get_job(self.job_id)
self.assertEqual(self.mock_successful_response_body, response)
def test_get_scheduled_job_raises_cron_job_scheduler_error_if_job_does_not_exist(self):
self.error_response.status_code = 404
self.mock_get.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.get_job(self.job_id)
self.assertIn(self.error_message, ex.exception.args)
def test_get_all_scheduled_jobs_calls_correct_endpoint(self):
self.scheduler.get_jobs()
self.mock_get.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs', headers={'Authorization': 'Bearer Token'})
def test_get_all_scheduled_jobs_returns_payload_from_request(self):
response = self.scheduler.get_jobs()
self.assertEqual(self.mock_successful_response_body, response)
def test_get_all_scheduled_jobs_raises_cron_job_scheduler_error_if_500(self):
self.error_response.status_code = 500
self.mock_get.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.get_jobs()
self.assertIn(self.error_message, ex.exception.args)
def test_update_job_schedule_calls_correct_endpoint(self):
self.scheduler.update_job_schedule(self.job_id, self.mock_cron_schedule)
patch_payload = {'schedule': self.mock_cron_schedule}
self.mock_patch.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs/{self.job_id}', headers={'Authorization': 'Bearer Token'}, json=patch_payload)
def test_update_job_schedule_raises_cron_job_scheduler_error_if_job_does_not_exist(self):
self.error_response.status_code = 404
self.mock_patch.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.update_job_schedule(self.job_id, self.mock_cron_schedule)
self.assertIn(self.error_message, ex.exception.args)
def test_update_job_schedule_raises_cron_job_scheduler_error_if_bad_request(self):
self.error_response.status_code = 400
self.mock_patch.return_value = self.error_response
with self.assertRaises(CronJobSchedulerError) as ex:
self.scheduler.update_job_schedule(self.job_id, self.mock_cron_schedule)
self.assertIn(self.error_message, ex.exception.args)
def test_get_subset_of_jobs_by_specifying_parameters(self):
params = {'project': self.project_name}
self.scheduler.get_job_with_params(params)
self.mock_get.assert_called_once_with(f'{self.scheduler_uri}/scheduled_jobs', headers={'Authorization': 'Bearer Token'}, params=params)
|
app/config/urls/rendering_subdomain.py | njmhendrix/grand-challenge.org | 101 | 12604804 | from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path
from django.views.generic import TemplateView
from grandchallenge.workstations.views import SessionDetail, session_proxy
def handler404(request, exception):
domain = request.site.domain.lower()
return HttpResponseRedirect(
f"{request.scheme}://{domain}{request.get_full_path()}"
)
def handler500(request):
context = {"request": request}
template_name = "500.html"
return TemplateResponse(request, template_name, context, status=500)
urlpatterns = [
path(
"robots.txt",
TemplateView.as_view(
template_name="robots.txt", content_type="text/plain"
),
),
path(
"workstations/<slug>/sessions/<uuid:pk>/",
SessionDetail.as_view(),
name="session-detail",
),
path(
"workstations/<slug>/sessions/<uuid:pk>/<path:path>",
session_proxy,
name="session-proxy",
),
]
|
seq2seq/dataset/__init__.py | mtran14/pytorch-seq2seq | 1,491 | 12604807 | from .fields import SourceField, TargetField
|
SRT/lib/datasets/VideoDatasetV2.py | yerang823/landmark-detection | 612 | 12604877 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from os import path as osp
from copy import deepcopy as copy
from tqdm import tqdm
import warnings, random, numpy as np
from pts_utils import generate_label_map
from xvision import denormalize_points
from xvision import identity2affine, solve2theta, affine2image
from .dataset_utils import pil_loader
from .point_meta_v2 import PointMeta2V
from .point_meta_v2 import apply_affine2point
from .point_meta_v2 import apply_boundary
from .optflow_utils import get_optflow_retval
import torch
import torch.utils.data as data
def check_is_image(frames):
assert len(frames) > 0, 'this is an empty frame list'
is_image = True
for frame in frames:
if frame != frames[0]:
is_image = False
return is_image
class VideoDatasetV2(data.Dataset):
def __init__(self, transform, sigma, downsample, heatmap_type, \
shape, use_gray, mean_file, data_indicator, config, tensor2img):
self.transform = transform
self.sigma = sigma
self.downsample = downsample
self.heatmap_type = heatmap_type
self.dataset_name = data_indicator
self.shape = shape # [H,W]
self.use_gray = use_gray
self.video_config = copy( config )
self.video_L = config.video_L
self.video_R = config.video_R
self.opt_backend = config.optflow_backend
self.optflow = get_optflow_retval( config.optflow_backend )
self.tensor2img = tensor2img
assert self.video_L >= 0 and self.video_R >=0, 'invalid video L and video R : {:} and {:}'.format(self.video_L, self.video_R)
assert transform is not None, 'transform : {:}'.format(transform)
if mean_file is None:
self.mean_data = None
warnings.warn('VideolDatasetV2 initialized with mean_data = None')
else:
assert osp.isfile(mean_file), '{:} is not a file.'.format(mean_file)
self.mean_data = torch.load(mean_file)
self.reset()
print ('The video dataset initialization done : {:}'.format(self))
def __repr__(self):
return ('{name}(point-num={NUM_PTS}, shape={shape}, length={length}, sigma={sigma}, heatmap_type={heatmap_type}, range=L.{video_L}~R.{video_R}, backend={opt_backend}, dataset={dataset_name})'.format(name=self.__class__.__name__, **self.__dict__))
def reset(self, num_pts=-1, boxid='default', only_pts=False):
self.NUM_PTS = num_pts
if only_pts: return
self.length = 0
self.datas = []
self.labels = []
self.NormDistances = []
self.BOXID = boxid
if self.mean_data is None:
self.mean_face = None
else:
self.mean_face = torch.Tensor(self.mean_data[boxid].copy().T)
assert (self.mean_face >= -1).all() and (self.mean_face <= 1).all(), 'mean-{:}-face : {:}'.format(boxid, self.mean_face)
self.cache_file2index_DXY = {}
#assert self.dataset_name is not None, 'The dataset name is None'
def __len__(self):
assert len(self.datas) == self.length, 'The length is not correct : {}'.format(self.length)
return self.length
def append(self, frames, label, distance):
for frame in frames: assert osp.isfile(frame), 'can not find the frame path : {:}'.format(frame)
self.datas.append( frames ) ; self.labels.append( label )
self.NormDistances.append( distance )
self.length = self.length + 1
self.cache_file2index_DXY[ frames[self.video_L] ] = len(self.datas) - 1
def load_list(self, file_lists, num_pts, boxindicator, normalizeL, reset):
if reset: self.reset(num_pts, boxindicator)
else : assert self.NUM_PTS == num_pts and self.BOXID == boxindicator, 'The number of point is inconsistance : {:} vs {:}'.format(self.NUM_PTS, num_pts)
if isinstance(file_lists, str): file_lists = [file_lists]
samples = []
for idx, file_path in enumerate(file_lists):
#print (':::: load list {:}/{:} : {:}'.format(idx, len(file_lists), file_path))
xdata = torch.load(file_path)
if isinstance(xdata, list) : data = xdata # image or video dataset list
elif isinstance(xdata, dict): data = xdata['datas'] # multi-view dataset list
else: raise ValueError('Invalid Type Error : {:}'.format( type(xdata) ))
samples = samples + data
print (':::: load list {:}/{:} : {:70s} || with {:} samples'.format(idx, len(file_lists), file_path, len(data)))
# samples is a list, where each element is the annotation. Each annotation is a dict, contains 'points' (3,num_pts), and various box
print ('Starting load {:} samples for VideoDataset-V2'.format(len(samples)))
# get the forward-backward frames
Fprevious, Fnext = {}, {}
for index, annotation in tqdm( enumerate(samples) ):
ppath, xpath, npath = annotation['previous_frame'], annotation['current_frame'], annotation['next_frame']
if xpath in Fprevious and Fprevious[xpath] is not None and ppath is not None:
assert Fprevious[xpath] == ppath, '{:} :: {:} vs. {:}'.format(index, Fprevious[xpath], ppath)
else: Fprevious[xpath] = ppath
if xpath in Fnext and Fnext[xpath] is not None and npath is not None:
assert Fnext[xpath] == npath, '{:} :: {:} vs. {:}'.format(index, Fnext[xpath], npath)
else: Fnext[xpath] = npath
#for index, annotation in tqdm( enumerate(samples) ):
for index in tqdm( range(len(samples)) ):
annotation = samples[index]
image_path = annotation['current_frame']
points, box = annotation['points'], annotation['box-{:}'.format(boxindicator)]
label = PointMeta2V(self.NUM_PTS, points, box, image_path, self.dataset_name)
if normalizeL is None: normDistance = None
else : normDistance = annotation['normalizeL-{:}'.format(normalizeL)]
if annotation['previous_frame'] is None and annotation['next_frame'] is None and annotation['points'] is None:
continue # useless data in our framework
frames = [None] * self.video_L + [image_path] + [None] * self.video_R
temp = Fprevious[image_path]
for i in range(self.video_L):
if temp is None: frames[self.video_L-i-1] = frames[self.video_L-i]
else:
frames[self.video_L-i-1] = temp
if temp in Fprevious: temp = Fprevious[temp]
else : temp = None
temp = Fnext[image_path]
for i in range(self.video_R):
if temp is None: frames[self.video_L+i+1] = frames[self.video_L+i]
else:
frames[self.video_L+i+1] = temp
if temp in Fnext: temp = Fnext[temp]
else : temp = None
self.append(frames, label, normDistance)
assert len(self.datas) == self.length, 'The length and the data is not right {:} vs {:}'.format(self.length, len(self.datas))
assert len(self.labels) == self.length, 'The length and the labels is not right {:} vs {:}'.format(self.length, len(self.labels))
assert len(self.NormDistances) == self.length, 'The length and the NormDistances is not right {:} vs {:}'.format(self.length, len(self.NormDistance))
print ('Load data done for VideoDatasetV2, which has {:} images.'.format(self.length))
def check_is_image(self, index):
if index < 0: index = self.length + index
assert index >= 0 and index < self.length, 'Invalid index : {:}'.format(index)
return check_is_image( self.datas[index] )
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'Invalid index : {:}'.format(index)
frame_paths = self.datas[index]
frames = [pil_loader(f_path, self.use_gray) for f_path in frame_paths]
target = self.labels[index].copy()
torch_is_image = torch.ByteTensor( [check_is_image(frame_paths)] )
affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta, torch_index, torch_nopoints, torch_shape = self._process_(frames, target, index, check_is_image(frame_paths))
return affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta, torch_index, torch_nopoints, torch_shape, torch_is_image
def find_index(self, xpath):
assert xpath in self.cache_file2index_DXY, 'Can not find this path : {:}'.format(xpath)
index = self.cache_file2index_DXY[ xpath ]
points = self.labels[ index ].get_points()
return points
def _process_(self, pil_frames, target, index, skip_opt):
# transform the image and points
frames, target, theta = self.transform(pil_frames, target)
(C, H, W), (height, width) = frames[0].size(), self.shape
# obtain the visiable indicator vector
if target.is_none(): nopoints = True
else : nopoints = False
if isinstance(theta, list) or isinstance(theta, tuple):
affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta = [], [], [], [], [], []
for _theta in theta:
_affineFrames, _forward_flow, _backward_flow, _heatmaps, _mask, _norm_trans_points, _theta, _transpose_theta \
= self.__process_affine(frames, target, _theta, nopoints, skip_opt)
affineFrames.append(_affineFrames)
forward_flow.append(_forward_flow)
backward_flow.append(_backward_flow)
heatmaps.append(_heatmaps)
mask.append(_mask)
norm_trans_points.append(_norm_trans_points)
THETA.append(_theta)
transpose_theta.append(_transpose_theta)
affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta = \
torch.stack(affineFrames), torch.stack(forward_flow), torch.stack(backward_flow), torch.stack(heatmaps), torch.stack(mask), torch.stack(norm_trans_points), torch.stack(THETA), torch.stack(transpose_theta)
else:
affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta = self.__process_affine(frames, target, theta, nopoints, skip_opt)
torch_index = torch.IntTensor([index])
torch_nopoints = torch.ByteTensor( [ nopoints ] )
torch_shape = torch.IntTensor([H,W])
return affineFrames, forward_flow, backward_flow, heatmaps, mask, norm_trans_points, THETA, transpose_theta, torch_index, torch_nopoints, torch_shape
def __process_affine(self, frames, target, theta, nopoints, skip_opt, aux_info=None):
frames, target, theta = [frame.clone() for frame in frames], target.copy(), theta.clone()
(C, H, W), (height, width) = frames[0].size(), self.shape
if nopoints: # do not have label
norm_trans_points = torch.zeros((3, self.NUM_PTS))
heatmaps = torch.zeros((self.NUM_PTS+1, height//self.downsample, width//self.downsample))
mask = torch.ones((self.NUM_PTS+1, 1, 1), dtype=torch.uint8)
transpose_theta = identity2affine(False)
else:
norm_trans_points = apply_affine2point(target.get_points(), theta, (H,W))
norm_trans_points = apply_boundary(norm_trans_points)
real_trans_points = norm_trans_points.clone()
real_trans_points[:2, :] = denormalize_points(self.shape, real_trans_points[:2,:])
heatmaps, mask = generate_label_map(real_trans_points.numpy(), height//self.downsample, width//self.downsample, self.sigma, self.downsample, nopoints, self.heatmap_type) # H*W*C
heatmaps = torch.from_numpy(heatmaps.transpose((2, 0, 1))).type(torch.FloatTensor)
mask = torch.from_numpy(mask.transpose((2, 0, 1))).type(torch.ByteTensor)
if torch.sum(norm_trans_points[2,:] == 1) < 3 or self.mean_face is None:
warnings.warn('In GeneralDatasetV2 after transformation, no visiable point, using identity instead. Aux: {:}'.format(aux_info))
transpose_theta = identity2affine(False)
else:
transpose_theta = solve2theta(norm_trans_points, self.mean_face.clone())
affineFrames = [affine2image(frame, theta, self.shape) for frame in frames]
if not skip_opt:
Gframes = [self.tensor2img(frame) for frame in affineFrames]
forward_flow, backward_flow = [], []
for idx in range( len(Gframes) ):
if idx > 0:
forward_flow.append( self.optflow.calc(Gframes[idx-1], Gframes[idx], None) )
if idx+1 < len(Gframes):
#backward_flow.append( self.optflow.calc(Gframes[idx], Gframes[idx+1], None) )
backward_flow.append( self.optflow.calc(Gframes[idx+1], Gframes[idx], None) )
forward_flow = torch.stack( [torch.from_numpy(x) for x in forward_flow] )
backward_flow = torch.stack( [torch.from_numpy(x) for x in backward_flow] )
else:
forward_flow, backward_flow = torch.zeros((len(affineFrames)-1, height, width, 2)), torch.zeros((len(affineFrames)-1, height, width, 2))
# affineFrames #frames x #channel x #height x #width
# forward_flow (#frames-1) x #height x #width x 2
# backward_flow (#frames-1) x #height x #width x 2
return torch.stack(affineFrames), forward_flow, backward_flow, heatmaps, mask, norm_trans_points, theta, transpose_theta
class SbrBatchSampler(object):
def __init__(self, dataset, ibatch, vbatch, sbr_sampler_use_vid):
'''
Args:
- dataset: an instance of the VideoDatasetV2 class
- ibatch: the batch size of images for one iteration
- vbatch: the batch size of videos for one iteration
'''
super(SbrBatchSampler, self).__init__()
self.length = len(dataset)
self.IMG_indexes = []
self.VID_indexes = []
for i in range(len(dataset)):
if dataset.labels[i].is_none() == False and (sbr_sampler_use_vid or dataset.check_is_image( i )):
self.IMG_indexes.append( i )
if dataset.check_is_image( i ) == False:
self.VID_indexes.append( i )
self.IMG_batch = ibatch
self.VID_batch = vbatch
if self.IMG_batch == 0: self.iters = len(self.VID_indexes) // self.VID_batch + 1
else : self.iters = len(self.IMG_indexes) // self.IMG_batch + 1
#assert self.IMG_batch > 0, 'image batch size must be greater than 0'
assert len(self.IMG_indexes) >= self.IMG_batch, '{:} vs {:}'.format(len(self.IMG_indexes), self.IMG_batch)
assert len(self.VID_indexes) >= self.VID_batch, '{:} vs {:}'.format(len(self.VID_indexes), self.VID_batch)
print ('In SbrBatchSampler, sample {:} images and {:} videos from {:} datas'.format(len(self.IMG_indexes), len(self.VID_indexes), len(dataset)))
def __iter__(self):
# yield a batch of indexes
for index in range(self.iters):
if self.IMG_batch == 0: images = []
else : images = random.sample(self.IMG_indexes, self.IMG_batch)
if self.VID_batch == 0: videos = []
else : videos = random.sample(self.VID_indexes, self.VID_batch)
batchlist = images + videos
assert len(batchlist) > 0, 'invalid batchlist : {:}'.format(batchlist)
batch = torch.LongTensor(batchlist)
yield batch
def __len__(self):
# returns the number of iterations (episodes) per epoch
return self.iters
|
pex/sorted_tuple.py | alexey-tereshenkov-oxb/pex | 2,160 | 12604946 | <reponame>alexey-tereshenkov-oxb/pex<gh_stars>1000+
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.typing import TYPE_CHECKING, Generic, cast, overload
if TYPE_CHECKING:
from typing import Any, Iterable, Iterator, Optional, Protocol, TypeVar, Union
class Comparable(Protocol):
def __lt__(self, other):
# type: (Any) -> bool
pass
_CT = TypeVar("_CT", bound=Comparable)
_T = TypeVar("_T", bound=Comparable)
class Comparator(Protocol):
def __call__(self, item):
# type: (Any) -> Comparable
pass
class SortedTuple(Generic["_CT"], tuple):
@overload
def __new__(cls):
# type: () -> SortedTuple[Any]
pass
@overload
def __new__(
cls,
iterable, # type: Iterable[_CT]
key=None, # type: None
reverse=False, # type: bool
):
# type: (...) -> SortedTuple[_CT]
pass
@overload
def __new__(
cls,
iterable, # type: Iterable[Any]
key, # type: Comparator
reverse=False, # type: bool
):
# type: (...) -> SortedTuple[_CT]
pass
def __new__(
cls,
iterable=None, # type: Union[None, Iterable[_CT], Iterable[Any]]
key=None, # type: Optional[Comparator]
reverse=False, # type: bool
):
# type: (...) -> SortedTuple[_CT]
return super(SortedTuple, cls).__new__(
cls, sorted(iterable, key=key, reverse=reverse) if iterable else ()
)
@overload
def __getitem__(self, index):
# type: (int) -> _CT
pass
@overload
def __getitem__(self, slice_spec):
# type: (slice) -> SortedTuple[_CT]
pass
def __getitem__(self, item):
# type: (Union[int, slice]) -> Union[_CT, SortedTuple[_CT]]
return cast("Union[_CT, SortedTuple[_CT]]", tuple.__getitem__(self, item))
def __iter__(self):
# type: () -> Iterator[_CT]
return tuple.__iter__(self)
|
venv/Lib/site-packages/altair/vegalite/schema.py | ajayiagbebaku/NFL-Model | 6,831 | 12604977 | """Altair schema wrappers"""
# flake8: noqa
from .v4.schema import *
|
common/dataset/ShapeNetV2.py | hyunynim/DIST-Renderer | 176 | 12604992 | <gh_stars>100-1000
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from geometry import Shape
import json
from tqdm import tqdm
def read_split_file(split_file):
with open(split_file, 'r') as f:
split = json.load(f)
key_list = list(split.keys())
assert(len(key_list) == 1)
dataset = key_list[0]
data = split[dataset]
key_list = list(data.keys())
assert(len(key_list) == 1)
class_name = key_list[0]
instance_list = split[dataset][class_name]
return instance_list
def filter_shape(shape_list, split_file):
instance_list = read_split_file(split_file)
new_list = []
for shape in tqdm(shape_list):
if shape.shape_md5 in instance_list:
new_list.append(shape)
return new_list
class ShapeNetV2(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self.shape_name_pairs = [('02691156', 'plane'),
('02834778', 'bicycle'),
('02858304', 'boat'),
('02876657', 'bottle'),
('02924116', 'bus'),
('02958343', 'car'),
('03001627', 'chair'),
('04379243', 'table'),
('03790512', 'motorbike'),
('04256520', 'sofa'),
('04468005', 'train'),
('03211117', 'tvmonitor')]
self.shape_class_ids = [x[0] for x in self.shape_name_pairs]
self.shape_names = [x[1] for x in self.shape_name_pairs]
def get_class_id_from_name(self, name):
if not (name in self.shape_names):
raise ValueError('class name {0} not found.'.format(name))
idx = self.shape_names.index(name)
return self.shape_class_ids[idx]
def get_name_from_class_id(self, class_id):
if not (class_id in self.shape_class_ids):
raise ValueError('class id {0} not found.'.format(class_id))
idx = self.shape_class_ids.index(class_id)
return self.shape_names[idx]
def get_split_file_name(self, name, mode='train'):
nowpath = os.path.dirname(os.path.abspath(__file__))
basepath = os.path.join(nowpath, '..', '..')
split_file = os.path.join(basepath, 'examples', 'splits', 'sv2_{0}s_{1}.json'.format(name, mode))
return split_file
def get_shape_list_from_name(self, name, use_split_file=None, mode='train'):
class_id = self.get_class_id_from_name(name)
return self.get_shape_list_from_class_id(class_id, use_split_file=use_split_file, mode=mode)
def get_shape_list_from_class_id(self, class_id, use_split_file=None, mode='train'):
path = os.path.join(self.data_dir, class_id)
if not os.path.exists(path):
return []
shape_md5_list = os.listdir(path)
shape_list = [Shape(class_id, shape_md5, os.path.join(path, shape_md5, 'models/model_normalized.obj')) for shape_md5 in shape_md5_list]
if use_split_file is not None:
name = self.get_name_from_class_id(class_id)
split_file = self.get_split_file_name(name, mode=mode)
shape_list = filter_shape(shape_list, split_file)
return shape_list
def get_shape_from_instance_name(self, class_id, instance_name):
path = os.path.join(self.data_dir, class_id)
shape = Shape(class_id, instance_name, os.path.join(path, instance_name, 'models/model_normalized.obj'))
return shape
|
migrations/versions/4d302aa44bc8_add_additional_revis.py | vault-the/changes | 443 | 12605008 | <filename>migrations/versions/4d302aa44bc8_add_additional_revis.py
"""Add additional revision data
Revision ID: 4d302aa44bc8
Revises: 215db24a630a
Create Date: 2013-11-26 16:20:59.454360
"""
# revision identifiers, used by Alembic.
revision = '4d302aa44bc8'
down_revision = '215db24a630a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('revision', sa.Column('committer_id', sa.GUID(), nullable=True))
op.add_column('revision', sa.Column('date_committed', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('revision', 'date_committed')
op.drop_column('revision', 'committer_id')
### end Alembic commands ###
|
tests/Exscript/QueueTest.py | saveshodhan/exscript | 226 | 12605045 | <gh_stars>100-1000
from builtins import object
import sys
import unittest
import re
import os.path
import warnings
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
warnings.simplefilter('ignore', DeprecationWarning)
import shutil
import time
import ctypes
from functools import partial
from tempfile import mkdtemp
from multiprocessing import Value
from multiprocessing.managers import BaseManager
from Exscript import Queue, Account, AccountPool, FileLogger
from Exscript.protocols import Protocol, Dummy
from Exscript.interpreter.exception import FailException
from Exscript.util.decorator import bind
from Exscript.util.log import log_to
def count_calls(job, data, **kwargs):
assert hasattr(job, 'start')
assert 'testarg' in kwargs
data.value += 1
def count_calls2(job, host, conn, data, **kwargs):
assert isinstance(conn, Protocol)
count_calls(job, data, **kwargs)
def count_and_fail(job, data, **kwargs):
count_calls(job, data, **kwargs)
raise FailException('intentional error')
def spawn_subtask(job, host, conn, queue, data, **kwargs):
count_calls2(job, host, conn, data, **kwargs)
func = bind(count_calls2, data, testarg=1)
task = queue.priority_run('subtask', func)
task.wait()
def do_nothing(job, host, conn):
pass
def say_hello(job, host, conn):
conn.send('hello')
def error(job, host, conn):
say_hello(job, host, conn)
raise FailException('intentional error')
def fatal_error(job, host, conn):
say_hello(job, host, conn)
raise Exception('intentional fatal error')
class MyProtocol(Dummy):
pass
def raise_if_not_myprotocol(job, host, conn):
if not isinstance(conn, MyProtocol):
raise Exception('not a MyProtocol instance')
class Log(object):
data = ''
def write(self, data):
self.data += data
def flush(self):
pass
def read(self):
return self.data
class LogManager(BaseManager):
pass
LogManager.register('Log', Log)
class QueueTest(unittest.TestCase):
CORRELATE = Queue
mode = 'threading'
def createQueue(self, logdir=None, **kwargs):
if self.queue:
self.queue.destroy()
self.out = self.manager.Log()
self.err = self.manager.Log()
self.queue = Queue(mode=self.mode,
stdout=self.out,
stderr=self.err,
**kwargs)
self.accm = self.queue.account_manager
if logdir is not None:
self.logger = FileLogger(logdir)
def setUp(self):
self.tempdir = mkdtemp()
self.queue = None
self.logger = None
self.manager = LogManager()
self.manager.start()
self.createQueue(verbose=-1, logdir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
try:
self.queue.destroy()
except:
pass # queue already destroyed
self.manager.shutdown()
def assertVerbosity(self, channel, expected):
data = channel.read()
if expected == 'no_tb':
self.assertTrue('error' in data, data)
self.assertNotIn('Traceback', data)
elif expected == 'tb':
self.assertTrue('error' in data, data)
self.assertIn('Traceback', data)
elif expected == '':
self.assertEqual(data, '')
else:
msg = repr(expected) + ' not in ' + repr(data)
self.assertTrue(expected in data, msg)
def testConstructor(self):
self.assertEqual(1, self.queue.get_max_threads())
# Test all verbosity levels.
levels = (
(-1, 1, ('', ''), ('', ''), ('', 'tb')),
(-1, 2, ('', ''), ('', ''), ('', 'tb')),
(0, 1, ('', ''), ('', 'no_tb'), ('', 'tb')),
(0, 2, ('', ''), ('', 'no_tb'), ('', 'tb')),
(1, 1, ('hello', ''), ('hello', 'no_tb'), ('hello', 'tb')),
(1, 2, ('[', ''), ('[', 'no_tb'), ('[', 'tb')),
(2, 1, ('hello', ''), ('hello', 'tb'), ('hello', 'tb')),
(2, 2, ('[', ''), ('[', 'tb'), ('[', 'tb')),
(3, 1, ('hello', ''), ('hello', 'tb'), ('hello', 'tb')),
(3, 2, ('[', ''), ('[', 'tb'), ('[', 'tb')),
(4, 1, ('hello', ''), ('hello', 'tb'), ('hello', 'tb')),
(4, 2, ('[', ''), ('[', 'tb'), ('[', 'tb')),
(5, 1, ('hello', ''), ('hello', 'tb'), ('hello', 'tb')),
(5, 2, ('[', ''), ('[', 'tb'), ('[', 'tb')),
)
for level, max_threads, with_simple, with_error, with_fatal in levels:
# print("S:", level, max_threads, with_simple, with_error,)
# with_fatal
stdout, stderr = with_simple
self.createQueue(verbose=level, max_threads=max_threads)
self.queue.run('dummy://mytest', say_hello)
self.queue.join()
self.assertVerbosity(self.out, stdout)
self.assertVerbosity(self.err, stderr)
# print("E:", level, max_threads, with_simple, with_error,)
# with_fatal
stdout, stderr = with_error
self.createQueue(verbose=level, max_threads=max_threads)
self.queue.run('dummy://mytest', error)
self.queue.join()
self.assertVerbosity(self.out, stdout)
self.assertVerbosity(self.err, stderr)
# print("F:", level, max_threads, with_simple, with_error,)
# with_fatal
stdout, stderr = with_fatal
self.createQueue(verbose=level, max_threads=max_threads)
self.queue.run('dummy://mytest', fatal_error)
self.queue.join()
self.assertVerbosity(self.out, stdout)
self.assertVerbosity(self.err, stderr)
def testCreatePipe(self):
account = Account('user', 'test')
self.accm.add_account(account)
pipe = self.queue._create_pipe()
pipe.send(('acquire-account', None))
response = pipe.recv()
expected = (account.__hash__(),
account.get_name(),
account.get_password(),
account.get_authorization_password(),
account.get_key())
self.assertEqual(response, expected)
pipe.send(('release-account', account.__hash__()))
response = pipe.recv()
self.assertEqual(response, 'ok')
pipe.close()
def testSetMaxThreads(self):
self.assertEqual(1, self.queue.get_max_threads())
self.queue.set_max_threads(2)
self.assertEqual(2, self.queue.get_max_threads())
def testGetMaxThreads(self):
pass # Already tested in testSetMaxThreads().
def testGetProgress(self):
self.assertEqual(0.0, self.queue.get_progress())
self.testIsCompleted()
self.assertEqual(100.0, self.queue.get_progress())
def testAddAccount(self):
self.assertEqual(0, self.accm.default_pool.n_accounts())
account = Account('user', 'test')
self.queue.add_account(account)
self.assertEqual(1, self.accm.default_pool.n_accounts())
def testAddAccountPool(self):
self.assertEqual(0, self.accm.default_pool.n_accounts())
account = Account('user', 'test')
self.queue.add_account(account)
self.assertEqual(1, self.accm.default_pool.n_accounts())
def match_cb(data, host):
data['match-called'].value = True
return True
def start_cb(data, job, host, conn):
account = conn.account_factory(None)
data['start-called'].value = True
data['account-hash'].value = account.__hash__()
account.release()
# Replace the default pool.
pool1 = AccountPool()
self.queue.add_account_pool(pool1)
self.assertEqual(self.accm.default_pool, pool1)
# Add another pool, making sure that it does not replace
# the default pool.
pool2 = AccountPool()
account2 = Account('user', 'test')
pool2.add_account(account2)
match_called = Value(ctypes.c_bool, False)
start_called = Value(ctypes.c_bool, False)
account_hash = Value(ctypes.c_long, 0)
data = {'match-called': match_called,
'start-called': start_called,
'account-hash': account_hash}
self.queue.add_account_pool(pool2, partial(match_cb, data))
self.assertEqual(self.accm.default_pool, pool1)
# Make sure that pool2 is chosen (because the match function
# returns True).
self.queue.run('dummy://dummy', partial(start_cb, data))
self.queue.shutdown()
data = dict((k, v.value) for (k, v) in list(data.items()))
self.assertEqual(data, {'match-called': True,
'start-called': True,
'account-hash': account2.__hash__()})
def startTask(self):
self.testAddAccount()
hosts = ['dummy://dummy1', 'dummy://dummy2']
task = self.queue.run(hosts, log_to(self.logger)(do_nothing))
self.assertTrue(task is not None)
return task
def testIsCompleted(self):
self.assertTrue(self.queue.is_completed())
task = self.startTask()
self.assertFalse(self.queue.is_completed())
task.wait()
self.assertTrue(task.is_completed())
self.assertTrue(self.queue.is_completed())
def testJoin(self):
task = self.startTask()
self.queue.join()
self.assertTrue(task.is_completed())
self.assertTrue(self.queue.is_completed())
def testShutdown(self):
task = self.startTask() # this also adds an account
self.queue.shutdown()
self.assertTrue(task.is_completed())
self.assertTrue(self.queue.is_completed())
self.assertEqual(self.accm.default_pool.n_accounts(), 1)
def testDestroy(self):
task = self.startTask() # this also adds an account
self.queue.destroy()
self.assertTrue(self.queue.is_completed())
self.assertEqual(self.accm.default_pool.n_accounts(), 0)
def testReset(self):
self.testAddAccount()
self.queue.reset()
self.assertEqual(self.accm.default_pool.n_accounts(), 0)
def testExceptionCallback(self):
self.exc = {}
def my_exc_cb(jobname, exc_info):
self.exc[jobname] = exc_info
self.createQueue(exc_cb=my_exc_cb)
self.queue.run('dummy://mytest', error)
self.queue.join()
self.assertIn("mytest", self.exc)
self.assertIsInstance(self.exc["mytest"][1], FailException)
def testRun(self):
data = Value('i', 0)
hosts = ['dummy://dummy1', 'dummy://dummy2']
func = bind(count_calls2, data, testarg=1)
self.queue.run(hosts, func)
self.queue.run('dummy://dummy3', func)
self.queue.shutdown()
self.assertEqual(data.value, 3)
self.queue.run('dummy://dummy4', func)
self.queue.destroy()
self.assertEqual(data.value, 4)
def testRunOrIgnore(self):
data = Value('i', 0)
hosts = ['dummy://dummy1', 'dummy://dummy2', 'dummy://dummy1']
func = bind(count_calls2, data, testarg=1)
self.queue.workqueue.pause()
self.queue.run_or_ignore(hosts, func)
self.queue.run_or_ignore('dummy://dummy2', func)
self.queue.workqueue.unpause()
self.queue.shutdown()
self.assertEqual(data.value, 2)
self.queue.run_or_ignore('dummy://dummy4', func)
self.queue.destroy()
self.assertEqual(data.value, 3)
def testPriorityRun(self):
def write(data, value, *args):
data.value = value
data = Value('i', 0)
self.queue.workqueue.pause()
self.queue.enqueue(partial(write, data, 1))
self.queue.priority_run('dummy://dummy', partial(write, data, 2))
self.queue.workqueue.unpause()
self.queue.destroy()
# The 'dummy' job should run first, so the value must
# be overwritten by the other process.
self.assertEqual(data.value, 1)
def testPriorityRunOrRaise(self):
data = Value('i', 0)
hosts = ['dummy://dummy1', 'dummy://dummy2', 'dummy://dummy1']
func = bind(count_calls2, data, testarg=1)
self.queue.workqueue.pause()
self.queue.priority_run_or_raise(hosts, func)
self.queue.priority_run_or_raise('dummy://dummy2', func)
self.queue.workqueue.unpause()
self.queue.shutdown()
self.assertEqual(data.value, 2)
self.queue.priority_run_or_raise('dummy://dummy4', func)
self.queue.destroy()
self.assertEqual(data.value, 3)
def testForceRun(self):
data = Value('i', 0)
hosts = ['dummy://dummy1', 'dummy://dummy2']
func = bind(count_calls2, data, testarg=1)
# By setting max_threads to 0 we ensure that the 'force' part is
# actually tested; the thread should run regardless.
self.queue.set_max_threads(0)
self.queue.force_run(hosts, func)
self.queue.destroy()
self.assertEqual(data.value, 2)
def testEnqueue(self):
data = Value('i', 0)
func = bind(count_calls, data, testarg=1)
self.queue.enqueue(func)
self.queue.enqueue(func)
self.queue.shutdown()
self.assertEqual(data.value, 2)
self.queue.enqueue(func)
self.queue.shutdown()
self.assertEqual(data.value, 3)
func = bind(count_and_fail, data, testarg=1)
self.queue.enqueue(func, attempts=7)
self.queue.destroy()
self.assertEqual(data.value, 10)
# FIXME: Not a method test; this should probably be elsewhere.
def testLogging(self):
task = self.startTask()
while not task.is_completed():
time.sleep(.1)
# The following function is not synchronous with the above, so add
# a timeout to avoid races.
time.sleep(.1)
self.assertTrue(self.queue.is_completed())
logfiles = os.listdir(self.tempdir)
self.assertEqual(2, len(logfiles))
self.assertIn('dummy1.log', logfiles)
self.assertIn('dummy2.log', logfiles)
for file in logfiles:
with open(os.path.join(self.tempdir, file)) as fp:
content = fp.read()
class QueueTestMultiProcessing(QueueTest):
mode = 'multiprocessing'
def suite():
loader = unittest.TestLoader()
suite1 = loader.loadTestsFromTestCase(QueueTest)
suite2 = loader.loadTestsFromTestCase(QueueTestMultiProcessing)
return unittest.TestSuite((suite1, suite2))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
rest_registration/verification_notifications.py | psibean/django-rest-registration | 329 | 12605075 | <reponame>psibean/django-rest-registration<filename>rest_registration/verification_notifications.py<gh_stars>100-1000
from typing import TYPE_CHECKING, Dict
from rest_framework.request import Request
from rest_registration.exceptions import VerificationTemplatesNotFound
from rest_registration.notifications.email import send_verification_notification
from rest_registration.notifications.enums import NotificationMethod, NotificationType
from rest_registration.settings import registration_settings
from rest_registration.signers.register import RegisterSigner
from rest_registration.signers.register_email import RegisterEmailSigner
from rest_registration.signers.reset_password import ResetPasswordSigner
from rest_registration.utils.users import get_user_verification_id
from rest_registration.utils.verification import select_default_templates
if TYPE_CHECKING:
from django.contrib.auth.base_user import AbstractBaseUser
def send_register_verification_email_notification(
request: Request,
user: 'AbstractBaseUser',
) -> None:
signer = RegisterSigner({
'user_id': get_user_verification_id(user),
}, request=request)
template_config_data = _get_email_template_config_data(
request, user, NotificationType.REGISTER_VERIFICATION)
notification_data = {
'params_signer': signer,
}
send_verification_notification(
NotificationType.REGISTER_VERIFICATION, user,
notification_data, template_config_data)
def send_register_email_verification_email_notification(
request: Request,
user: 'AbstractBaseUser',
email: str,
email_already_used: bool = False,
) -> None:
signer = RegisterEmailSigner({
'user_id': get_user_verification_id(user),
'email': email,
}, request=request)
notification_data = {
'params_signer': signer,
'email_already_used': email_already_used,
}
template_config_data = _get_email_template_config_data(
request, user, NotificationType.REGISTER_EMAIL_VERIFICATION)
send_verification_notification(
NotificationType.REGISTER_EMAIL_VERIFICATION, user,
notification_data, template_config_data, custom_user_address=email)
def send_reset_password_verification_email_notification(
request: Request,
user: 'AbstractBaseUser',
) -> None:
signer = ResetPasswordSigner({
'user_id': get_user_verification_id(user),
}, request=request)
template_config_data = _get_email_template_config_data(
request, user, NotificationType.RESET_PASSWORD_VERIFICATION)
notification_data = {
'params_signer': signer,
}
send_verification_notification(
NotificationType.RESET_PASSWORD_VERIFICATION, user, notification_data,
template_config_data)
def _get_email_template_config_data(
request: Request,
user: 'AbstractBaseUser',
notification_type: NotificationType,
) -> Dict[str, str]:
template_selector = registration_settings.VERIFICATION_TEMPLATES_SELECTOR
notification_method = NotificationMethod.EMAIL
try:
template_config_data = template_selector(
request=request,
user=user,
notification_method=notification_method,
notification_type=notification_type,
)
except (VerificationTemplatesNotFound, LookupError):
template_config_data = select_default_templates(
request=request,
user=user,
notification_method=notification_method,
notification_type=notification_type,
)
return template_config_data
|
tests/components/image/__init__.py | tbarbette/core | 30,023 | 12605091 | <reponame>tbarbette/core
"""Tests for the Image integration."""
|
tools/authgen.py | abharath27/azure-libraries-for-net | 365 | 12605109 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import subprocess
import json
import re
# Endpoints
p = subprocess.Popen('az cloud show -o json --query "{managementURI: endpoints.management, baseURL: endpoints.resourceManager, authURL: endpoints.activeDirectory, graphURL: endpoints.activeDirectoryGraphResourceId}"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
cloud = json.loads(p.stdout.read().decode("utf8"))
# Subscription
p = subprocess.Popen('az account show -o json --query "{subscription: id}"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
account = json.loads(p.stdout.read().decode("utf8"))
# Service principal
p = subprocess.Popen('az ad sp create-for-rbac -o json --query "{client: appId, key: password, tenant: tenant}"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
out = p.stdout.read()
out = re.sub(b"[^.]*{", b"{", out)
sp = json.loads(out.decode("utf8"))
for key,value in sp.items():
print(key + "=" + value)
for key,value in account.items():
print(key + "=" + value)
for key,value in cloud.items():
if (not value.endswith("/")):
value = value + "/"
print(key + "=" + value.replace("https://", r"https\://"))
|
leetcode.com/python/373_Find_K_Pairs_with_Smallest_Sums.py | vansh-tiwari/coding-interview-gym | 713 | 12605119 | <gh_stars>100-1000
import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
maxHeap = []
for nums1Idx in range(min(k, len(nums1))):
for nums2Idx in range(min(k, len(nums2))):
currentPairSum = nums1[nums1Idx] + nums2[nums2Idx]
if len(maxHeap) < k:
heapq.heappush(maxHeap, (-currentPairSum, nums1Idx, nums2Idx))
else:
if currentPairSum < (-maxHeap[0][0]):
heapq.heappop(maxHeap)
heapq.heappush(maxHeap, (-currentPairSum, nums1Idx, nums2Idx))
else:
break
smallestPairs = []
for (currentPairSum, nums1Idx, nums2Idx) in maxHeap:
smallestPairs.append([nums1[nums1Idx], nums2[nums2Idx]])
return smallestPairs
|
src/genie/libs/parser/iosxr/tests/ShowProcessMemory/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12605183 |
expected_output = {
'jid': {1: {'index': {1: {'data': 344,
'dynamic': 0,
'jid': 1,
'process': 'init',
'stack': 136,
'text': 296}}},
51: {'index': {1: {'data': 1027776,
'dynamic': 5668,
'jid': 51,
'process': 'processmgr',
'stack': 136,
'text': 1372}}},
53: {'index': {1: {'data': 342500,
'dynamic': 7095,
'jid': 53,
'process': 'dsr',
'stack': 136,
'text': 32}}},
111: {'index': {1: {'data': 531876,
'dynamic': 514,
'jid': 111,
'process': 'devc-conaux-aux',
'stack': 136,
'text': 8}}},
112: {'index': {1: {'data': 861144,
'dynamic': 957,
'jid': 112,
'process': 'qsm',
'stack': 136,
'text': 144}}},
113: {'index': {1: {'data': 400776,
'dynamic': 671,
'jid': 113,
'process': 'spp',
'stack': 136,
'text': 328}}},
114: {'index': {1: {'data': 531912,
'dynamic': 545,
'jid': 114,
'process': 'devc-conaux-con',
'stack': 136,
'text': 8}}},
115: {'index': {1: {'data': 662452,
'dynamic': 366,
'jid': 115,
'process': 'syslogd_helper',
'stack': 136,
'text': 52}}},
118: {'index': {1: {'data': 200748,
'dynamic': 426,
'jid': 118,
'process': 'shmwin_svr',
'stack': 136,
'text': 56}}},
119: {'index': {1: {'data': 397880,
'dynamic': 828,
'jid': 119,
'process': 'syslog_dev',
'stack': 136,
'text': 12}}},
121: {'index': {1: {'data': 470008,
'dynamic': 6347,
'jid': 121,
'process': 'calv_alarm_mgr',
'stack': 136,
'text': 504}}},
122: {'index': {1: {'data': 1003480,
'dynamic': 2838,
'jid': 122,
'process': 'udp',
'stack': 136,
'text': 180}}},
123: {'index': {1: {'data': 529852,
'dynamic': 389,
'jid': 123,
'process': 'enf_broker',
'stack': 136,
'text': 40}}},
124: {'index': {1: {'data': 200120,
'dynamic': 351,
'jid': 124,
'process': 'procfs_server',
'stack': 168,
'text': 20}}},
125: {'index': {1: {'data': 333592,
'dynamic': 1506,
'jid': 125,
'process': 'pifibm_server_rp',
'stack': 136,
'text': 312}}},
126: {'index': {1: {'data': 399332,
'dynamic': 305,
'jid': 126,
'process': 'ltrace_sync',
'stack': 136,
'text': 28}}},
127: {'index': {1: {'data': 797548,
'dynamic': 2573,
'jid': 127,
'process': 'ifindex_server',
'stack': 136,
'text': 96}}},
128: {'index': {1: {'data': 532612,
'dynamic': 3543,
'jid': 128,
'process': 'eem_ed_test',
'stack': 136,
'text': 44}}},
130: {'index': {1: {'data': 200120,
'dynamic': 257,
'jid': 130,
'process': 'igmp_policy_reg_agent',
'stack': 136,
'text': 8}}},
132: {'index': {1: {'data': 200628,
'dynamic': 280,
'jid': 132,
'process': 'show_mediang_edm',
'stack': 136,
'text': 20}}},
134: {'index': {1: {'data': 466168,
'dynamic': 580,
'jid': 134,
'process': 'ipv4_acl_act_agent',
'stack': 136,
'text': 28}}},
136: {'index': {1: {'data': 997196,
'dynamic': 5618,
'jid': 136,
'process': 'resmon',
'stack': 136,
'text': 188}}},
137: {'index': {1: {'data': 534184,
'dynamic': 2816,
'jid': 137,
'process': 'bundlemgr_local',
'stack': 136,
'text': 612}}},
138: {'index': {1: {'data': 200652,
'dynamic': 284,
'jid': 138,
'process': 'chkpt_proxy',
'stack': 136,
'text': 16}}},
139: {'index': {1: {'data': 200120,
'dynamic': 257,
'jid': 139,
'process': 'lisp_xr_policy_reg_agent',
'stack': 136,
'text': 8}}},
141: {'index': {1: {'data': 200648,
'dynamic': 246,
'jid': 141,
'process': 'linux_nto_misc_showd',
'stack': 136,
'text': 20}}},
143: {'index': {1: {'data': 200644,
'dynamic': 247,
'jid': 143,
'process': 'procfind',
'stack': 136,
'text': 20}}},
146: {'index': {1: {'data': 200240,
'dynamic': 275,
'jid': 146,
'process': 'bgp_policy_reg_agent',
'stack': 136,
'text': 28}}},
147: {'index': {1: {'data': 201332,
'dynamic': 418,
'jid': 147,
'process': 'type6_server',
'stack': 136,
'text': 68}}},
149: {'index': {1: {'data': 663524,
'dynamic': 1297,
'jid': 149,
'process': 'clns',
'stack': 136,
'text': 188}}},
152: {'index': {1: {'data': 532616,
'dynamic': 3541,
'jid': 152,
'process': 'eem_ed_none',
'stack': 136,
'text': 52}}},
154: {'index': {1: {'data': 729896,
'dynamic': 1046,
'jid': 154,
'process': 'ipv4_acl_mgr',
'stack': 136,
'text': 140}}},
155: {'index': {1: {'data': 200120,
'dynamic': 261,
'jid': 155,
'process': 'ospf_policy_reg_agent',
'stack': 136,
'text': 12}}},
157: {'index': {1: {'data': 200908,
'dynamic': 626,
'jid': 157,
'process': 'ssh_key_server',
'stack': 136,
'text': 44}}},
158: {'index': {1: {'data': 200628,
'dynamic': 285,
'jid': 158,
'process': 'heap_summary_edm',
'stack': 136,
'text': 20}}},
161: {'index': {1: {'data': 200640,
'dynamic': 297,
'jid': 161,
'process': 'cmp_edm',
'stack': 136,
'text': 16}}},
162: {'index': {1: {'data': 267456,
'dynamic': 693,
'jid': 162,
'process': 'ip_aps',
'stack': 136,
'text': 52}}},
166: {'index': {1: {'data': 935480,
'dynamic': 8194,
'jid': 166,
'process': 'mpls_lsd',
'stack': 136,
'text': 1108}}},
167: {'index': {1: {'data': 730776,
'dynamic': 3649,
'jid': 167,
'process': 'ipv6_ma',
'stack': 136,
'text': 540}}},
168: {'index': {1: {'data': 266788,
'dynamic': 589,
'jid': 168,
'process': 'nd_partner',
'stack': 136,
'text': 36}}},
169: {'index': {1: {'data': 735000,
'dynamic': 6057,
'jid': 169,
'process': 'ipsub_ma',
'stack': 136,
'text': 680}}},
171: {'index': {1: {'data': 266432,
'dynamic': 530,
'jid': 171,
'process': 'shelf_mgr_proxy',
'stack': 136,
'text': 16}}},
172: {'index': {1: {'data': 200604,
'dynamic': 253,
'jid': 172,
'process': 'early_fast_discard_verifier',
'stack': 136,
'text': 16}}},
174: {'index': {1: {'data': 200096,
'dynamic': 256,
'jid': 174,
'process': 'bundlemgr_checker',
'stack': 136,
'text': 56}}},
175: {'index': {1: {'data': 200120,
'dynamic': 248,
'jid': 175,
'process': 'syslog_infra_hm',
'stack': 136,
'text': 12}}},
177: {'index': {1: {'data': 200112,
'dynamic': 241,
'jid': 177,
'process': 'meminfo_svr',
'stack': 136,
'text': 8}}},
178: {'index': {1: {'data': 468272,
'dynamic': 2630,
'jid': 178,
'process': 'accounting_ma',
'stack': 136,
'text': 264}}},
180: {'index': {1: {'data': 1651090,
'dynamic': 242,
'jid': 180,
'process': 'aipc_cleaner',
'stack': 136,
'text': 8}}},
181: {'index': {1: {'data': 201280,
'dynamic': 329,
'jid': 181,
'process': 'nsr_ping_reply',
'stack': 136,
'text': 16}}},
182: {'index': {1: {'data': 334236,
'dynamic': 843,
'jid': 182,
'process': 'spio_ma',
'stack': 136,
'text': 4}}},
183: {'index': {1: {'data': 266788,
'dynamic': 607,
'jid': 183,
'process': 'statsd_server',
'stack': 136,
'text': 40}}},
184: {'index': {1: {'data': 407016,
'dynamic': 8579,
'jid': 184,
'process': 'subdb_svr',
'stack': 136,
'text': 368}}},
186: {'index': {1: {'data': 932992,
'dynamic': 3072,
'jid': 186,
'process': 'smartlicserver',
'stack': 136,
'text': 16}}},
187: {'index': {1: {'data': 200120,
'dynamic': 259,
'jid': 187,
'process': 'rip_policy_reg_agent',
'stack': 136,
'text': 8}}},
188: {'index': {1: {'data': 533704,
'dynamic': 3710,
'jid': 188,
'process': 'eem_ed_nd',
'stack': 136,
'text': 60}}},
189: {'index': {1: {'data': 401488,
'dynamic': 3499,
'jid': 189,
'process': 'ifmgr',
'stack': 136,
'text': 4}}},
190: {'index': {1: {'data': 1001552,
'dynamic': 3082,
'jid': 190,
'process': 'rdsfs_svr',
'stack': 136,
'text': 196}}},
191: {'index': {1: {'data': 398300,
'dynamic': 632,
'jid': 191,
'process': 'hostname_sync',
'stack': 136,
'text': 12}}},
192: {'index': {1: {'data': 466168,
'dynamic': 570,
'jid': 192,
'process': 'l2vpn_policy_reg_agent',
'stack': 136,
'text': 20}}},
193: {'index': {1: {'data': 665096,
'dynamic': 1405,
'jid': 193,
'process': 'ntpd',
'stack': 136,
'text': 344}}},
194: {'index': {1: {'data': 794692,
'dynamic': 2629,
'jid': 194,
'process': 'nrssvr',
'stack': 136,
'text': 180}}},
195: {'index': {1: {'data': 531776,
'dynamic': 748,
'jid': 195,
'process': 'ipv4_io',
'stack': 136,
'text': 256}}},
196: {'index': {1: {'data': 200624,
'dynamic': 274,
'jid': 196,
'process': 'domain_sync',
'stack': 136,
'text': 16}}},
197: {'index': {1: {'data': 1015252,
'dynamic': 21870,
'jid': 197,
'process': 'parser_server',
'stack': 136,
'text': 304}}},
198: {'index': {1: {'data': 532612,
'dynamic': 3540,
'jid': 198,
'process': 'eem_ed_config',
'stack': 136,
'text': 56}}},
199: {'index': {1: {'data': 200648,
'dynamic': 282,
'jid': 199,
'process': 'cerrno_server',
'stack': 136,
'text': 48}}},
200: {'index': {1: {'data': 531264,
'dynamic': 1810,
'jid': 200,
'process': 'ipv4_arm',
'stack': 136,
'text': 344}}},
201: {'index': {1: {'data': 268968,
'dynamic': 1619,
'jid': 201,
'process': 'session_mon',
'stack': 136,
'text': 68}}},
202: {'index': {1: {'data': 864208,
'dynamic': 3472,
'jid': 202,
'process': 'netio',
'stack': 136,
'text': 292}}},
204: {'index': {1: {'data': 268932,
'dynamic': 2122,
'jid': 204,
'process': 'ether_caps_partner',
'stack': 136,
'text': 152}}},
205: {'index': {1: {'data': 201168,
'dynamic': 254,
'jid': 205,
'process': 'sunstone_stats_svr',
'stack': 136,
'text': 28}}},
206: {'index': {1: {'data': 794684,
'dynamic': 2967,
'jid': 206,
'process': 'sysdb_shared_nc',
'stack': 136,
'text': 4}}},
207: {'index': {1: {'data': 601736,
'dynamic': 2823,
'jid': 207,
'process': 'yang_server',
'stack': 136,
'text': 268}}},
208: {'index': {1: {'data': 200096,
'dynamic': 251,
'jid': 208,
'process': 'ipodwdm',
'stack': 136,
'text': 16}}},
209: {'index': {1: {'data': 200656,
'dynamic': 253,
'jid': 209,
'process': 'crypto_edm',
'stack': 136,
'text': 24}}},
210: {'index': {1: {'data': 878632,
'dynamic': 13237,
'jid': 210,
'process': 'nvgen_server',
'stack': 136,
'text': 244}}},
211: {'index': {1: {'data': 334080,
'dynamic': 2169,
'jid': 211,
'process': 'pfilter_ma',
'stack': 136,
'text': 228}}},
213: {'index': {1: {'data': 531840,
'dynamic': 1073,
'jid': 213,
'process': 'kim',
'stack': 136,
'text': 428}}},
216: {'index': {1: {'data': 267224,
'dynamic': 451,
'jid': 216,
'process': 'showd_lc',
'stack': 136,
'text': 64}}},
217: {'index': {1: {'data': 406432,
'dynamic': 4666,
'jid': 217,
'process': 'pppoe_ma',
'stack': 136,
'text': 520}}},
218: {'index': {1: {'data': 664484,
'dynamic': 2602,
'jid': 218,
'process': 'l2rib',
'stack': 136,
'text': 484}}},
220: {'index': {1: {'data': 598812,
'dynamic': 3443,
'jid': 220,
'process': 'eem_ed_syslog',
'stack': 136,
'text': 60}}},
221: {'index': {1: {'data': 267264,
'dynamic': 290,
'jid': 221,
'process': 'lpts_fm',
'stack': 136,
'text': 52}}},
222: {'index': {1: {'data': 205484,
'dynamic': 5126,
'jid': 222,
'process': 'mpa_fm_svr',
'stack': 136,
'text': 12}}},
243: {'index': {1: {'data': 267576,
'dynamic': 990,
'jid': 243,
'process': 'spio_ea',
'stack': 136,
'text': 8}}},
244: {'index': {1: {'data': 200632,
'dynamic': 247,
'jid': 244,
'process': 'mempool_edm',
'stack': 136,
'text': 8}}},
245: {'index': {1: {'data': 532624,
'dynamic': 3541,
'jid': 245,
'process': 'eem_ed_counter',
'stack': 136,
'text': 48}}},
247: {'index': {1: {'data': 1010268,
'dynamic': 1923,
'jid': 247,
'process': 'cfgmgr-rp',
'stack': 136,
'text': 344}}},
248: {'index': {1: {'data': 465260,
'dynamic': 1243,
'jid': 248,
'process': 'alarm-logger',
'stack': 136,
'text': 104}}},
249: {'index': {1: {'data': 797376,
'dynamic': 1527,
'jid': 249,
'process': 'locald_DLRSC',
'stack': 136,
'text': 604}}},
250: {'index': {1: {'data': 265800,
'dynamic': 438,
'jid': 250,
'process': 'lcp_mgr',
'stack': 136,
'text': 12}}},
251: {'index': {1: {'data': 265840,
'dynamic': 712,
'jid': 251,
'process': 'tamfs',
'stack': 136,
'text': 32}}},
252: {'index': {1: {'data': 531384,
'dynamic': 7041,
'jid': 252,
'process': 'sysdb_svr_local',
'stack': 136,
'text': 4}}},
253: {'index': {1: {'data': 200672,
'dynamic': 256,
'jid': 253,
'process': 'tty_show_users_edm',
'stack': 136,
'text': 32}}},
254: {'index': {1: {'data': 534032,
'dynamic': 4463,
'jid': 254,
'process': 'eem_ed_generic',
'stack': 136,
'text': 96}}},
255: {'index': {1: {'data': 201200,
'dynamic': 409,
'jid': 255,
'process': 'ipv6_acl_cfg_agent',
'stack': 136,
'text': 32}}},
256: {'index': {1: {'data': 334104,
'dynamic': 756,
'jid': 256,
'process': 'mpls_vpn_mib',
'stack': 136,
'text': 156}}},
257: {'index': {1: {'data': 267888,
'dynamic': 339,
'jid': 257,
'process': 'bundlemgr_adj',
'stack': 136,
'text': 156}}},
258: {'index': {1: {'data': 1651090,
'dynamic': 244,
'jid': 258,
'process': 'file_paltx',
'stack': 136,
'text': 16}}},
259: {'index': {1: {'data': 1000600,
'dynamic': 6088,
'jid': 259,
'process': 'ipv6_nd',
'stack': 136,
'text': 1016}}},
260: {'index': {1: {'data': 533044,
'dynamic': 1793,
'jid': 260,
'process': 'sdr_instagt',
'stack': 136,
'text': 260}}},
261: {'index': {1: {'data': 334860,
'dynamic': 806,
'jid': 261,
'process': 'ipsec_pp',
'stack': 136,
'text': 220}}},
266: {'index': {1: {'data': 266344,
'dynamic': 717,
'jid': 266,
'process': 'pm_server',
'stack': 136,
'text': 92}}},
267: {'index': {1: {'data': 598760,
'dynamic': 2768,
'jid': 267,
'process': 'object_tracking',
'stack': 136,
'text': 204}}},
268: {'index': {1: {'data': 200700,
'dynamic': 417,
'jid': 268,
'process': 'wdsysmon_fd_edm',
'stack': 136,
'text': 20}}},
269: {'index': {1: {'data': 664752,
'dynamic': 2513,
'jid': 269,
'process': 'eth_mgmt',
'stack': 136,
'text': 60}}},
270: {'index': {1: {'data': 200064,
'dynamic': 257,
'jid': 270,
'process': 'gcp_fib_verifier',
'stack': 136,
'text': 20}}},
271: {'index': {1: {'data': 400624,
'dynamic': 2348,
'jid': 271,
'process': 'rsi_agent',
'stack': 136,
'text': 580}}},
272: {'index': {1: {'data': 794692,
'dynamic': 1425,
'jid': 272,
'process': 'nrssvr_global',
'stack': 136,
'text': 180}}},
273: {'index': {1: {'data': 494124,
'dynamic': 19690,
'jid': 273,
'process': 'invmgr_proxy',
'stack': 136,
'text': 112}}},
275: {'index': {1: {'data': 199552,
'dynamic': 264,
'jid': 275,
'process': 'nsr_fo',
'stack': 136,
'text': 12}}},
276: {'index': {1: {'data': 202328,
'dynamic': 436,
'jid': 276,
'process': 'mpls_fwd_show_proxy',
'stack': 136,
'text': 204}}},
277: {'index': {1: {'data': 267112,
'dynamic': 688,
'jid': 277,
'process': 'tam_sync',
'stack': 136,
'text': 44}}},
278: {'index': {1: {'data': 200120,
'dynamic': 259,
'jid': 278,
'process': 'mldp_policy_reg_agent',
'stack': 136,
'text': 8}}},
290: {'index': {1: {'data': 200640,
'dynamic': 262,
'jid': 290,
'process': 'sh_proc_mem_edm',
'stack': 136,
'text': 20}}},
291: {'index': {1: {'data': 794684,
'dynamic': 3678,
'jid': 291,
'process': 'sysdb_shared_sc',
'stack': 136,
'text': 4}}},
293: {'index': {1: {'data': 200120,
'dynamic': 259,
'jid': 293,
'process': 'pim6_policy_reg_agent',
'stack': 136,
'text': 8}}},
294: {'index': {1: {'data': 267932,
'dynamic': 1495,
'jid': 294,
'process': 'issumgr',
'stack': 136,
'text': 560}}},
295: {'index': {1: {'data': 266744,
'dynamic': 296,
'jid': 295,
'process': 'vlan_ea',
'stack': 136,
'text': 220}}},
296: {'index': {1: {'data': 796404,
'dynamic': 1902,
'jid': 296,
'process': 'correlatord',
'stack': 136,
'text': 292}}},
297: {'index': {1: {'data': 201304,
'dynamic': 367,
'jid': 297,
'process': 'imaedm_server',
'stack': 136,
'text': 56}}},
298: {'index': {1: {'data': 200224,
'dynamic': 246,
'jid': 298,
'process': 'ztp_cfg',
'stack': 136,
'text': 12}}},
299: {'index': {1: {'data': 268000,
'dynamic': 459,
'jid': 299,
'process': 'ipv6_ea',
'stack': 136,
'text': 92}}},
301: {'index': {1: {'data': 200644,
'dynamic': 250,
'jid': 301,
'process': 'sysmgr_show_proc_all_edm',
'stack': 136,
'text': 88}}},
303: {'index': {1: {'data': 399360,
'dynamic': 882,
'jid': 303,
'process': 'tftp_fs',
'stack': 136,
'text': 68}}},
304: {'index': {1: {'data': 202220,
'dynamic': 306,
'jid': 304,
'process': 'ncd',
'stack': 136,
'text': 32}}},
305: {'index': {1: {'data': 1001716,
'dynamic': 9508,
'jid': 305,
'process': 'gsp',
'stack': 136,
'text': 1096}}},
306: {'index': {1: {'data': 794684,
'dynamic': 1792,
'jid': 306,
'process': 'sysdb_svr_admin',
'stack': 136,
'text': 4}}},
308: {'index': {1: {'data': 333172,
'dynamic': 538,
'jid': 308,
'process': 'devc-vty',
'stack': 136,
'text': 8}}},
309: {'index': {1: {'data': 1012628,
'dynamic': 9404,
'jid': 309,
'process': 'tcp',
'stack': 136,
'text': 488}}},
310: {'index': {1: {'data': 333572,
'dynamic': 2092,
'jid': 310,
'process': 'daps',
'stack': 136,
'text': 512}}},
312: {'index': {1: {'data': 200620,
'dynamic': 283,
'jid': 312,
'process': 'ipv6_assembler',
'stack': 136,
'text': 36}}},
313: {'index': {1: {'data': 199844,
'dynamic': 551,
'jid': 313,
'process': 'ssh_key_client',
'stack': 136,
'text': 48}}},
314: {'index': {1: {'data': 332076,
'dynamic': 371,
'jid': 314,
'process': 'timezone_config',
'stack': 136,
'text': 28}}},
316: {'index': {1: {'data': 531560,
'dynamic': 2016,
'jid': 316,
'process': 'bcdls',
'stack': 136,
'text': 112}}},
317: {'index': {1: {'data': 531560,
'dynamic': 2015,
'jid': 317,
'process': 'bcdls',
'stack': 136,
'text': 112}}},
318: {'index': {1: {'data': 532344,
'dynamic': 2874,
'jid': 318,
'process': 'bcdls',
'stack': 136,
'text': 112}}},
319: {'index': {1: {'data': 532344,
'dynamic': 2874,
'jid': 319,
'process': 'bcdls',
'stack': 136,
'text': 112}}},
320: {'index': {1: {'data': 531556,
'dynamic': 2013,
'jid': 320,
'process': 'bcdls',
'stack': 136,
'text': 112}}},
326: {'index': {1: {'data': 398256,
'dynamic': 348,
'jid': 326,
'process': 'sld',
'stack': 136,
'text': 116}}},
327: {'index': {1: {'data': 997196,
'dynamic': 3950,
'jid': 327,
'process': 'eem_policy_dir',
'stack': 136,
'text': 268}}},
329: {'index': {1: {'data': 267464,
'dynamic': 434,
'jid': 329,
'process': 'mpls_io_ea',
'stack': 136,
'text': 108}}},
332: {'index': {1: {'data': 332748,
'dynamic': 276,
'jid': 332,
'process': 'redstatsd',
'stack': 136,
'text': 20}}},
333: {'index': {1: {'data': 799488,
'dynamic': 4511,
'jid': 333,
'process': 'rsi_master',
'stack': 136,
'text': 404}}},
334: {'index': {1: {'data': 333648,
'dynamic': 351,
'jid': 334,
'process': 'sconbkup',
'stack': 136,
'text': 12}}},
336: {'index': {1: {'data': 199440,
'dynamic': 204,
'jid': 336,
'process': 'pam_manager',
'stack': 136,
'text': 12}}},
337: {'index': {1: {'data': 600644,
'dynamic': 3858,
'jid': 337,
'process': 'nve_mgr',
'stack': 136,
'text': 204}}},
339: {'index': {1: {'data': 266800,
'dynamic': 679,
'jid': 339,
'process': 'rmf_svr',
'stack': 136,
'text': 140}}},
341: {'index': {1: {'data': 465864,
'dynamic': 1145,
'jid': 341,
'process': 'ipv6_io',
'stack': 136,
'text': 160}}},
342: {'index': {1: {'data': 864468,
'dynamic': 1011,
'jid': 342,
'process': 'syslogd',
'stack': 136,
'text': 224}}},
343: {'index': {1: {'data': 663932,
'dynamic': 1013,
'jid': 343,
'process': 'ipv6_acl_daemon',
'stack': 136,
'text': 212}}},
344: {'index': {1: {'data': 996048,
'dynamic': 2352,
'jid': 344,
'process': 'plat_sl_client',
'stack': 136,
'text': 108}}},
346: {'index': {1: {'data': 598152,
'dynamic': 778,
'jid': 346,
'process': 'cinetd',
'stack': 136,
'text': 136}}},
347: {'index': {1: {'data': 200648,
'dynamic': 261,
'jid': 347,
'process': 'debug_d',
'stack': 136,
'text': 24}}},
349: {'index': {1: {'data': 200612,
'dynamic': 284,
'jid': 349,
'process': 'debug_d_admin',
'stack': 136,
'text': 20}}},
350: {'index': {1: {'data': 399188,
'dynamic': 1344,
'jid': 350,
'process': 'vm-monitor',
'stack': 136,
'text': 72}}},
352: {'index': {1: {'data': 465844,
'dynamic': 1524,
'jid': 352,
'process': 'lpts_pa',
'stack': 136,
'text': 308}}},
353: {'index': {1: {'data': 1002896,
'dynamic': 5160,
'jid': 353,
'process': 'call_home',
'stack': 136,
'text': 728}}},
355: {'index': {1: {'data': 994116,
'dynamic': 7056,
'jid': 355,
'process': 'eem_server',
'stack': 136,
'text': 292}}},
356: {'index': {1: {'data': 200720,
'dynamic': 396,
'jid': 356,
'process': 'tcl_secure_mode',
'stack': 136,
'text': 8}}},
357: {'index': {1: {'data': 202040,
'dynamic': 486,
'jid': 357,
'process': 'tamsvcs_tamm',
'stack': 136,
'text': 36}}},
359: {'index': {1: {'data': 531256,
'dynamic': 1788,
'jid': 359,
'process': 'ipv6_arm',
'stack': 136,
'text': 328}}},
360: {'index': {1: {'data': 201196,
'dynamic': 363,
'jid': 360,
'process': 'fwd_driver_partner',
'stack': 136,
'text': 88}}},
361: {'index': {1: {'data': 533872,
'dynamic': 2637,
'jid': 361,
'process': 'ipv6_mfwd_partner',
'stack': 136,
'text': 836}}},
362: {'index': {1: {'data': 932680,
'dynamic': 3880,
'jid': 362,
'process': 'arp',
'stack': 136,
'text': 728}}},
363: {'index': {1: {'data': 202024,
'dynamic': 522,
'jid': 363,
'process': 'cepki',
'stack': 136,
'text': 96}}},
364: {'index': {1: {'data': 1001736,
'dynamic': 4343,
'jid': 364,
'process': 'fib_mgr',
'stack': 136,
'text': 3580}}},
365: {'index': {1: {'data': 269016,
'dynamic': 2344,
'jid': 365,
'process': 'pim_ma',
'stack': 136,
'text': 56}}},
368: {'index': {1: {'data': 1002148,
'dynamic': 3111,
'jid': 368,
'process': 'raw_ip',
'stack': 136,
'text': 124}}},
369: {'index': {1: {'data': 464272,
'dynamic': 625,
'jid': 369,
'process': 'ltrace_server',
'stack': 136,
'text': 40}}},
371: {'index': {1: {'data': 200572,
'dynamic': 279,
'jid': 371,
'process': 'netio_debug_partner',
'stack': 136,
'text': 24}}},
372: {'index': {1: {'data': 200120,
'dynamic': 259,
'jid': 372,
'process': 'pim_policy_reg_agent',
'stack': 136,
'text': 8}}},
373: {'index': {1: {'data': 333240,
'dynamic': 1249,
'jid': 373,
'process': 'policymgr_rp',
'stack': 136,
'text': 592}}},
375: {'index': {1: {'data': 200624,
'dynamic': 290,
'jid': 375,
'process': 'loopback_caps_partner',
'stack': 136,
'text': 32}}},
376: {'index': {1: {'data': 467420,
'dynamic': 3815,
'jid': 376,
'process': 'eem_ed_sysmgr',
'stack': 136,
'text': 76}}},
377: {'index': {1: {'data': 333636,
'dynamic': 843,
'jid': 377,
'process': 'mpls_io',
'stack': 136,
'text': 140}}},
378: {'index': {1: {'data': 200120,
'dynamic': 258,
'jid': 378,
'process': 'ospfv3_policy_reg_agent',
'stack': 136,
'text': 8}}},
380: {'index': {1: {'data': 333604,
'dynamic': 520,
'jid': 380,
'process': 'fhrp_output',
'stack': 136,
'text': 124}}},
381: {'index': {1: {'data': 533872,
'dynamic': 2891,
'jid': 381,
'process': 'ipv4_mfwd_partner',
'stack': 136,
'text': 828}}},
382: {'index': {1: {'data': 465388,
'dynamic': 538,
'jid': 382,
'process': 'packet',
'stack': 136,
'text': 132}}},
383: {'index': {1: {'data': 333284,
'dynamic': 359,
'jid': 383,
'process': 'dumper',
'stack': 136,
'text': 40}}},
384: {'index': {1: {'data': 200636,
'dynamic': 244,
'jid': 384,
'process': 'showd_server',
'stack': 136,
'text': 12}}},
385: {'index': {1: {'data': 603424,
'dynamic': 3673,
'jid': 385,
'process': 'ipsec_mp',
'stack': 136,
'text': 592}}},
388: {'index': {1: {'data': 729160,
'dynamic': 836,
'jid': 388,
'process': 'bcdl_agent',
'stack': 136,
'text': 176}}},
389: {'index': {1: {'data': 729880,
'dynamic': 1066,
'jid': 389,
'process': 'bcdl_agent',
'stack': 136,
'text': 176}}},
390: {'index': {1: {'data': 663828,
'dynamic': 1384,
'jid': 390,
'process': 'bcdl_agent',
'stack': 136,
'text': 176}}},
391: {'index': {1: {'data': 795416,
'dynamic': 1063,
'jid': 391,
'process': 'bcdl_agent',
'stack': 136,
'text': 176}}},
401: {'index': {1: {'data': 466148,
'dynamic': 579,
'jid': 401,
'process': 'es_acl_act_agent',
'stack': 136,
'text': 20}}},
402: {'index': {1: {'data': 597352,
'dynamic': 1456,
'jid': 402,
'process': 'vi_config_replicator',
'stack': 136,
'text': 40}}},
403: {'index': {1: {'data': 532624,
'dynamic': 3546,
'jid': 403,
'process': 'eem_ed_timer',
'stack': 136,
'text': 64}}},
405: {'index': {1: {'data': 664196,
'dynamic': 2730,
'jid': 405,
'process': 'pm_collector',
'stack': 136,
'text': 732}}},
406: {'index': {1: {'data': 868076,
'dynamic': 5739,
'jid': 406,
'process': 'ppp_ma',
'stack': 136,
'text': 1268}}},
407: {'index': {1: {'data': 794684,
'dynamic': 1753,
'jid': 407,
'process': 'sysdb_shared_data_nc',
'stack': 136,
'text': 4}}},
408: {'index': {1: {'data': 415316,
'dynamic': 16797,
'jid': 408,
'process': 'statsd_manager_l',
'stack': 136,
'text': 4}}},
409: {'index': {1: {'data': 946780,
'dynamic': 16438,
'jid': 409,
'process': 'iedged',
'stack': 136,
'text': 1824}}},
411: {'index': {1: {'data': 542460,
'dynamic': 17658,
'jid': 411,
'process': 'sysdb_mc',
'stack': 136,
'text': 388}}},
412: {'index': {1: {'data': 1003624,
'dynamic': 5783,
'jid': 412,
'process': 'l2fib_mgr',
'stack': 136,
'text': 1808}}},
413: {'index': {1: {'data': 401532,
'dynamic': 2851,
'jid': 413,
'process': 'aib',
'stack': 136,
'text': 256}}},
414: {'index': {1: {'data': 266776,
'dynamic': 440,
'jid': 414,
'process': 'rmf_cli_edm',
'stack': 136,
'text': 32}}},
415: {'index': {1: {'data': 399116,
'dynamic': 895,
'jid': 415,
'process': 'ether_sock',
'stack': 136,
'text': 28}}},
416: {'index': {1: {'data': 200980,
'dynamic': 275,
'jid': 416,
'process': 'shconf-edm',
'stack': 136,
'text': 32}}},
417: {'index': {1: {'data': 532108,
'dynamic': 3623,
'jid': 417,
'process': 'eem_ed_stats',
'stack': 136,
'text': 60}}},
418: {'index': {1: {'data': 532288,
'dynamic': 2306,
'jid': 418,
'process': 'ipv4_ma',
'stack': 136,
'text': 540}}},
419: {'index': {1: {'data': 689020,
'dynamic': 15522,
'jid': 419,
'process': 'sdr_invmgr',
'stack': 136,
'text': 144}}},
420: {'index': {1: {'data': 466456,
'dynamic': 1661,
'jid': 420,
'process': 'http_client',
'stack': 136,
'text': 96}}},
421: {'index': {1: {'data': 201152,
'dynamic': 285,
'jid': 421,
'process': 'pak_capture_partner',
'stack': 136,
'text': 16}}},
422: {'index': {1: {'data': 200016,
'dynamic': 267,
'jid': 422,
'process': 'bag_schema_svr',
'stack': 136,
'text': 36}}},
424: {'index': {1: {'data': 604932,
'dynamic': 8135,
'jid': 424,
'process': 'issudir',
'stack': 136,
'text': 212}}},
425: {'index': {1: {'data': 466796,
'dynamic': 1138,
'jid': 425,
'process': 'l2snoop',
'stack': 136,
'text': 104}}},
426: {'index': {1: {'data': 331808,
'dynamic': 444,
'jid': 426,
'process': 'ssm_process',
'stack': 136,
'text': 56}}},
427: {'index': {1: {'data': 200120,
'dynamic': 245,
'jid': 427,
'process': 'media_server',
'stack': 136,
'text': 16}}},
428: {'index': {1: {'data': 267340,
'dynamic': 432,
'jid': 428,
'process': 'ip_app',
'stack': 136,
'text': 48}}},
429: {'index': {1: {'data': 269032,
'dynamic': 2344,
'jid': 429,
'process': 'pim6_ma',
'stack': 136,
'text': 56}}},
431: {'index': {1: {'data': 200416,
'dynamic': 390,
'jid': 431,
'process': 'local_sock',
'stack': 136,
'text': 16}}},
432: {'index': {1: {'data': 265704,
'dynamic': 269,
'jid': 432,
'process': 'crypto_monitor',
'stack': 136,
'text': 68}}},
433: {'index': {1: {'data': 597624,
'dynamic': 1860,
'jid': 433,
'process': 'ema_server_sdr',
'stack': 136,
'text': 112}}},
434: {'index': {1: {'data': 200120,
'dynamic': 259,
'jid': 434,
'process': 'isis_policy_reg_agent',
'stack': 136,
'text': 8}}},
435: {'index': {1: {'data': 200120,
'dynamic': 261,
'jid': 435,
'process': 'eigrp_policy_reg_agent',
'stack': 136,
'text': 12}}},
437: {'index': {1: {'data': 794096,
'dynamic': 776,
'jid': 437,
'process': 'cdm_rs',
'stack': 136,
'text': 80}}},
1003: {'index': {1: {'data': 798196,
'dynamic': 3368,
'jid': 1003,
'process': 'eigrp',
'stack': 136,
'text': 936}}},
1011: {'index': {1: {'data': 1006776,
'dynamic': 8929,
'jid': 1011,
'process': 'isis',
'stack': 136,
'text': 4888}}},
1012: {'index': {1: {'data': 1006776,
'dynamic': 8925,
'jid': 1012,
'process': 'isis',
'stack': 136,
'text': 4888}}},
1027: {'index': {1: {'data': 1012376,
'dynamic': 14258,
'jid': 1027,
'process': 'ospf',
'stack': 136,
'text': 2880}}},
1046: {'index': {1: {'data': 804288,
'dynamic': 8673,
'jid': 1046,
'process': 'ospfv3',
'stack': 136,
'text': 1552}}},
1066: {'index': {1: {'data': 333188,
'dynamic': 1084,
'jid': 1066,
'process': 'autorp_candidate_rp',
'stack': 136,
'text': 52}}},
1067: {'index': {1: {'data': 532012,
'dynamic': 1892,
'jid': 1067,
'process': 'autorp_map_agent',
'stack': 136,
'text': 84}}},
1071: {'index': {1: {'data': 998992,
'dynamic': 5498,
'jid': 1071,
'process': 'msdp',
'stack': 136,
'text': 484}}},
1074: {'index': {1: {'data': 599436,
'dynamic': 1782,
'jid': 1074,
'process': 'rip',
'stack': 136,
'text': 296}}},
1078: {'index': {1: {'data': 1045796,
'dynamic': 40267,
'jid': 1078,
'process': 'bgp',
'stack': 136,
'text': 2408}}},
1093: {'index': {1: {'data': 668844,
'dynamic': 3577,
'jid': 1093,
'process': 'bpm',
'stack': 136,
'text': 716}}},
1101: {'index': {1: {'data': 266776,
'dynamic': 602,
'jid': 1101,
'process': 'cdp_mgr',
'stack': 136,
'text': 24}}},
1113: {'index': {1: {'data': 200096,
'dynamic': 251,
'jid': 1113,
'process': 'eigrp_uv',
'stack': 136,
'text': 48}}},
1114: {'index': {1: {'data': 1084008,
'dynamic': 45594,
'jid': 1114,
'process': 'emsd',
'stack': 136,
'text': 10636}}},
1128: {'index': {1: {'data': 200156,
'dynamic': 284,
'jid': 1128,
'process': 'isis_uv',
'stack': 136,
'text': 84}}},
1130: {'index': {1: {'data': 599144,
'dynamic': 2131,
'jid': 1130,
'process': 'lldp_agent',
'stack': 136,
'text': 412}}},
1135: {'index': {1: {'data': 1052648,
'dynamic': 24083,
'jid': 1135,
'process': 'netconf',
'stack': 136,
'text': 772}}},
1136: {'index': {1: {'data': 600036,
'dynamic': 795,
'jid': 1136,
'process': 'netconf_agent_tty',
'stack': 136,
'text': 20}}},
1139: {'index': {1: {'data': 200092,
'dynamic': 259,
'jid': 1139,
'process': 'ospf_uv',
'stack': 136,
'text': 48}}},
1140: {'index': {1: {'data': 200092,
'dynamic': 258,
'jid': 1140,
'process': 'ospfv3_uv',
'stack': 136,
'text': 32}}},
1147: {'index': {1: {'data': 808524,
'dynamic': 5098,
'jid': 1147,
'process': 'sdr_mgbl_proxy',
'stack': 136,
'text': 464}}},
1221: {'index': {1: {'data': 200848,
'dynamic': 503,
'jid': 1221,
'process': 'ssh_conf_verifier',
'stack': 136,
'text': 32}}},
1233: {'index': {1: {'data': 399212,
'dynamic': 1681,
'jid': 1233,
'process': 'mpls_static',
'stack': 136,
'text': 252}}},
1234: {'index': {1: {'data': 464512,
'dynamic': 856,
'jid': 1234,
'process': 'lldp_mgr',
'stack': 136,
'text': 100}}},
1235: {'index': {1: {'data': 665416,
'dynamic': 1339,
'jid': 1235,
'process': 'intf_mgbl',
'stack': 136,
'text': 212}}},
1236: {'index': {1: {'data': 546924,
'dynamic': 17047,
'jid': 1236,
'process': 'statsd_manager_g',
'stack': 136,
'text': 4}}},
1237: {'index': {1: {'data': 201996,
'dynamic': 1331,
'jid': 1237,
'process': 'ipv4_mfwd_ma',
'stack': 136,
'text': 144}}},
1238: {'index': {1: {'data': 1015244,
'dynamic': 22504,
'jid': 1238,
'process': 'ipv4_rib',
'stack': 136,
'text': 1008}}},
1239: {'index': {1: {'data': 201364,
'dynamic': 341,
'jid': 1239,
'process': 'ipv6_mfwd_ma',
'stack': 136,
'text': 136}}},
1240: {'index': {1: {'data': 951448,
'dynamic': 26381,
'jid': 1240,
'process': 'ipv6_rib',
'stack': 136,
'text': 1160}}},
1241: {'index': {1: {'data': 873952,
'dynamic': 11135,
'jid': 1241,
'process': 'mrib',
'stack': 136,
'text': 1536}}},
1242: {'index': {1: {'data': 873732,
'dynamic': 11043,
'jid': 1242,
'process': 'mrib6',
'stack': 136,
'text': 1516}}},
1243: {'index': {1: {'data': 800236,
'dynamic': 3444,
'jid': 1243,
'process': 'policy_repository',
'stack': 136,
'text': 472}}},
1244: {'index': {1: {'data': 399440,
'dynamic': 892,
'jid': 1244,
'process': 'ipv4_mpa',
'stack': 136,
'text': 160}}},
1245: {'index': {1: {'data': 399444,
'dynamic': 891,
'jid': 1245,
'process': 'ipv6_mpa',
'stack': 136,
'text': 160}}},
1246: {'index': {1: {'data': 200664,
'dynamic': 261,
'jid': 1246,
'process': 'eth_gl_cfg',
'stack': 136,
'text': 20}}},
1247: {'index': {1: {'data': 941936,
'dynamic': 13246,
'jid': 1247,
'process': 'igmp',
'stack': 144,
'text': 980}}},
1248: {'index': {1: {'data': 267440,
'dynamic': 677,
'jid': 1248,
'process': 'ipv4_connected',
'stack': 136,
'text': 4}}},
1249: {'index': {1: {'data': 267424,
'dynamic': 677,
'jid': 1249,
'process': 'ipv4_local',
'stack': 136,
'text': 4}}},
1250: {'index': {1: {'data': 267436,
'dynamic': 680,
'jid': 1250,
'process': 'ipv6_connected',
'stack': 136,
'text': 4}}},
1251: {'index': {1: {'data': 267420,
'dynamic': 681,
'jid': 1251,
'process': 'ipv6_local',
'stack': 136,
'text': 4}}},
1252: {'index': {1: {'data': 940472,
'dynamic': 12973,
'jid': 1252,
'process': 'mld',
'stack': 136,
'text': 928}}},
1253: {'index': {1: {'data': 1018740,
'dynamic': 22744,
'jid': 1253,
'process': 'pim',
'stack': 136,
'text': 4424}}},
1254: {'index': {1: {'data': 1017788,
'dynamic': 22444,
'jid': 1254,
'process': 'pim6',
'stack': 136,
'text': 4544}}},
1255: {'index': {1: {'data': 799148,
'dynamic': 4916,
'jid': 1255,
'process': 'bundlemgr_distrib',
'stack': 136,
'text': 2588}}},
1256: {'index': {1: {'data': 999524,
'dynamic': 7871,
'jid': 1256,
'process': 'bfd',
'stack': 136,
'text': 1512}}},
1257: {'index': {1: {'data': 268092,
'dynamic': 1903,
'jid': 1257,
'process': 'bgp_epe',
'stack': 136,
'text': 60}}},
1258: {'index': {1: {'data': 268016,
'dynamic': 493,
'jid': 1258,
'process': 'domain_services',
'stack': 136,
'text': 136}}},
1259: {'index': {1: {'data': 201184,
'dynamic': 272,
'jid': 1259,
'process': 'ethernet_stats_controller_edm',
'stack': 136,
'text': 32}}},
1260: {'index': {1: {'data': 399868,
'dynamic': 874,
'jid': 1260,
'process': 'ftp_fs',
'stack': 136,
'text': 64}}},
1261: {'index': {1: {'data': 206536,
'dynamic': 2468,
'jid': 1261,
'process': 'python_process_manager',
'stack': 136,
'text': 12}}},
1262: {'index': {1: {'data': 200360,
'dynamic': 421,
'jid': 1262,
'process': 'tty_verifyd',
'stack': 136,
'text': 8}}},
1263: {'index': {1: {'data': 265924,
'dynamic': 399,
'jid': 1263,
'process': 'ipv4_rump',
'stack': 136,
'text': 60}}},
1264: {'index': {1: {'data': 265908,
'dynamic': 394,
'jid': 1264,
'process': 'ipv6_rump',
'stack': 136,
'text': 108}}},
1265: {'index': {1: {'data': 729900,
'dynamic': 1030,
'jid': 1265,
'process': 'es_acl_mgr',
'stack': 136,
'text': 56}}},
1266: {'index': {1: {'data': 530424,
'dynamic': 723,
'jid': 1266,
'process': 'rt_check_mgr',
'stack': 136,
'text': 104}}},
1267: {'index': {1: {'data': 336304,
'dynamic': 2594,
'jid': 1267,
'process': 'pbr_ma',
'stack': 136,
'text': 184}}},
1268: {'index': {1: {'data': 466552,
'dynamic': 2107,
'jid': 1268,
'process': 'qos_ma',
'stack': 136,
'text': 876}}},
1269: {'index': {1: {'data': 334576,
'dynamic': 975,
'jid': 1269,
'process': 'vservice_mgr',
'stack': 136,
'text': 60}}},
1270: {'index': {1: {'data': 1000676,
'dynamic': 5355,
'jid': 1270,
'process': 'mpls_ldp',
'stack': 136,
'text': 2952}}},
1271: {'index': {1: {'data': 1002132,
'dynamic': 6985,
'jid': 1271,
'process': 'xtc_agent',
'stack': 136,
'text': 1948}}},
1272: {'index': {1: {'data': 1017288,
'dynamic': 14858,
'jid': 1272,
'process': 'l2vpn_mgr',
'stack': 136,
'text': 5608}}},
1273: {'index': {1: {'data': 424,
'dynamic': 0,
'jid': 1273,
'process': 'bash',
'stack': 136,
'text': 1016}}},
1274: {'index': {1: {'data': 202200,
'dynamic': 1543,
'jid': 1274,
'process': 'cmpp',
'stack': 136,
'text': 60}}},
1275: {'index': {1: {'data': 334624,
'dynamic': 1555,
'jid': 1275,
'process': 'l2tp_mgr',
'stack': 136,
'text': 960}}},
1276: {'index': {1: {'data': 223128,
'dynamic': 16781,
'jid': 1276,
'process': 'schema_server',
'stack': 136,
'text': 80}}},
1277: {'index': {1: {'data': 670692,
'dynamic': 6660,
'jid': 1277,
'process': 'sdr_instmgr',
'stack': 136,
'text': 1444}}},
1278: {'index': {1: {'data': 1004336,
'dynamic': 436,
'jid': 1278,
'process': 'snmppingd',
'stack': 136,
'text': 24}}},
1279: {'index': {1: {'data': 200120,
'dynamic': 263,
'jid': 1279,
'process': 'ssh_backup_server',
'stack': 136,
'text': 100}}},
1280: {'index': {1: {'data': 398960,
'dynamic': 835,
'jid': 1280,
'process': 'ssh_server',
'stack': 136,
'text': 228}}},
1281: {'index': {1: {'data': 399312,
'dynamic': 1028,
'jid': 1281,
'process': 'tc_server',
'stack': 136,
'text': 240}}},
1282: {'index': {1: {'data': 200636,
'dynamic': 281,
'jid': 1282,
'process': 'wanphy_proc',
'stack': 136,
'text': 12}}},
67280: {'index': {1: {'data': 204,
'dynamic': 0,
'jid': 67280,
'process': 'bash',
'stack': 136,
'text': 1016}}},
67321: {'index': {1: {'data': 132,
'dynamic': 0,
'jid': 67321,
'process': 'sh',
'stack': 136,
'text': 1016}}},
67322: {'index': {1: {'data': 204,
'dynamic': 0,
'jid': 67322,
'process': 'bash',
'stack': 136,
'text': 1016}}},
67338: {'index': {1: {'data': 40,
'dynamic': 0,
'jid': 67338,
'process': 'cgroup_oom',
'stack': 136,
'text': 8}}},
67493: {'index': {1: {'data': 176,
'dynamic': 0,
'jid': 67493,
'process': 'bash',
'stack': 136,
'text': 1016}}},
67499: {'index': {1: {'data': 624,
'dynamic': 0,
'jid': 67499,
'process': 'bash',
'stack': 136,
'text': 1016}}},
67513: {'index': {1: {'data': 256,
'dynamic': 0,
'jid': 67513,
'process': 'inotifywait',
'stack': 136,
'text': 24}}},
67514: {'index': {1: {'data': 636,
'dynamic': 0,
'jid': 67514,
'process': 'bash',
'stack': 136,
'text': 1016}}},
67563: {'index': {1: {'data': 8408,
'dynamic': 0,
'jid': 67563,
'process': 'dbus-daemon',
'stack': 136,
'text': 408}}},
67582: {'index': {1: {'data': 440,
'dynamic': 0,
'jid': 67582,
'process': 'sshd',
'stack': 136,
'text': 704}}},
67592: {'index': {1: {'data': 200,
'dynamic': 0,
'jid': 67592,
'process': 'rpcbind',
'stack': 136,
'text': 44}}},
67686: {'index': {1: {'data': 244,
'dynamic': 0,
'jid': 67686,
'process': 'rngd',
'stack': 136,
'text': 20}}},
67692: {'index': {1: {'data': 176,
'dynamic': 0,
'jid': 67692,
'process': 'syslogd',
'stack': 136,
'text': 44}}},
67695: {'index': {1: {'data': 3912,
'dynamic': 0,
'jid': 67695,
'process': 'klogd',
'stack': 136,
'text': 28}}},
67715: {'index': {1: {'data': 176,
'dynamic': 0,
'jid': 67715,
'process': 'xinetd',
'stack': 136,
'text': 156}}},
67758: {'index': {1: {'data': 748,
'dynamic': 0,
'jid': 67758,
'process': 'crond',
'stack': 524,
'text': 56}}},
68857: {'index': {1: {'data': 672,
'dynamic': 0,
'jid': 68857,
'process': 'bash',
'stack': 136,
'text': 1016}}},
68876: {'index': {1: {'data': 744,
'dynamic': 0,
'jid': 68876,
'process': 'bash',
'stack': 136,
'text': 1016}}},
68881: {'index': {1: {'data': 82976,
'dynamic': 0,
'jid': 68881,
'process': 'dev_inotify_hdlr',
'stack': 136,
'text': 12}}},
68882: {'index': {1: {'data': 82976,
'dynamic': 0,
'jid': 68882,
'process': 'dev_inotify_hdlr',
'stack': 136,
'text': 12}}},
68909: {'index': {1: {'data': 88312,
'dynamic': 0,
'jid': 68909,
'process': 'ds',
'stack': 136,
'text': 56}}},
69594: {'index': {1: {'data': 199480,
'dynamic': 173,
'jid': 69594,
'process': 'tty_exec_launcher',
'stack': 136,
'text': 16}}},
70487: {'index': {1: {'data': 200108,
'dynamic': 312,
'jid': 70487,
'process': 'tams_proc',
'stack': 136,
'text': 440}}},
70709: {'index': {1: {'data': 200200,
'dynamic': 342,
'jid': 70709,
'process': 'tamd_proc',
'stack': 136,
'text': 32}}},
73424: {'index': {1: {'data': 200808,
'dynamic': 0,
'jid': 73424,
'process': 'attestation_agent',
'stack': 136,
'text': 108}}},
75962: {'index': {1: {'data': 206656,
'dynamic': 0,
'jid': 75962,
'process': 'pyztp2',
'stack': 136,
'text': 8}}},
76021: {'index': {1: {'data': 1536,
'dynamic': 0,
'jid': 76021,
'process': 'bash',
'stack': 136,
'text': 1016}}},
76022: {'index': {1: {'data': 1784,
'dynamic': 0,
'jid': 76022,
'process': 'bash',
'stack': 136,
'text': 1016}}},
76639: {'index': {1: {'data': 16480,
'dynamic': 0,
'jid': 76639,
'process': 'perl',
'stack': 136,
'text': 8}}},
76665: {'index': {1: {'data': 487380,
'dynamic': 0,
'jid': 76665,
'process': 'pam_cli_agent',
'stack': 136,
'text': 1948}}},
76768: {'index': {1: {'data': 24868,
'dynamic': 0,
'jid': 76768,
'process': 'perl',
'stack': 136,
'text': 8}}},
76784: {'index': {1: {'data': 17356,
'dynamic': 0,
'jid': 76784,
'process': 'perl',
'stack': 136,
'text': 8}}},
76802: {'index': {1: {'data': 16280,
'dynamic': 0,
'jid': 76802,
'process': 'perl',
'stack': 136,
'text': 8}}},
77304: {'index': {1: {'data': 598100,
'dynamic': 703,
'jid': 77304,
'process': 'exec',
'stack': 136,
'text': 76}}},
80488: {'index': {1: {'data': 172,
'dynamic': 0,
'jid': 80488,
'process': 'sleep',
'stack': 136,
'text': 32}}},
80649: {'index': {1: {'data': 172,
'dynamic': 0,
'jid': 80649,
'process': 'sleep',
'stack': 136,
'text': 32}}},
80788: {'index': {1: {'data': 1484,
'dynamic': 2,
'jid': 80788,
'process': 'sleep',
'stack': 136,
'text': 32}}},
80791: {'index': {1: {'data': 420,
'dynamic': 0,
'jid': 80791,
'process': 'sh',
'stack': 136,
'text': 1016}}},
80792: {'index': {1: {'data': 133912,
'dynamic': 194,
'jid': 80792,
'process': 'sh_proc_mem_cli',
'stack': 136,
'text': 12}}},
80796: {'index': {1: {'data': 484,
'dynamic': 0,
'jid': 80796,
'process': 'sh',
'stack': 136,
'text': 1016}}},
80797: {'index': {1: {'data': 133916,
'dynamic': 204,
'jid': 80797,
'process': 'sh_proc_memory',
'stack': 136,
'text': 12
}
}
}
}
}
|
opencga-app/build/analysis/tabix-0.2.6/tabix.py | roalva1/opencga | 125 | 12605231 | #!/usr/bin/env python
# Author: <NAME> and <NAME>
# License: MIT/X11
import sys
from ctypes import *
from ctypes.util import find_library
import glob, platform
def load_shared_library(lib, _path='.', ver='*'):
"""Search for and load the tabix library. The
expectation is that the library is located in
the current directory (ie. "./")
"""
# find from the system path
path = find_library(lib)
if (path == None): # if fail, search in the custom directory
s = platform.system()
if (s == 'Darwin'): suf = ver+'.dylib'
elif (s == 'Linux'): suf = '.so'+ver
candidates = glob.glob(_path+'/lib'+lib+suf);
if (len(candidates) == 1): path = candidates[0]
else: return None
cdll.LoadLibrary(path)
return CDLL(path)
def tabix_init():
"""Initialize and return a tabix reader object
for subsequent tabix_get() calls.
"""
tabix = load_shared_library('tabix')
if (tabix == None): return None
tabix.ti_read.restype = c_char_p
# on Mac OS X 10.6, the following declarations are required.
tabix.ti_open.restype = c_void_p
tabix.ti_querys.argtypes = [c_void_p, c_char_p]
tabix.ti_querys.restype = c_void_p
tabix.ti_query.argtypes = [c_void_p, c_char_p, c_int, c_int]
tabix.ti_query.restype = c_void_p
tabix.ti_read.argtypes = [c_void_p, c_void_p, c_void_p]
tabix.ti_iter_destroy.argtypes = [c_void_p]
tabix.ti_close.argtypes = [c_void_p]
# FIXME: explicit declarations for APIs not used in this script
return tabix
# OOP interface
class Tabix:
def __init__(self, fn, fnidx=0):
self.tabix = tabix_init();
if (self.tabix == None):
sys.stderr.write("[Tabix] Please make sure the shared library is compiled and available.\n")
return
self.fp = self.tabix.ti_open(fn, fnidx);
def __del__(self):
if (self.tabix): self.tabix.ti_close(self.fp)
def fetch(self, chr, start=-1, end=-1):
"""Generator function that will yield each interval
within the requested range from the requested file.
"""
if (self.tabix == None): return
if (start < 0): iter = self.tabix.ti_querys(self.fp, chr) # chr looks like: "chr2:1,000-2,000" or "chr2"
else: iter = self.tabix.ti_query(self.fp, chr, start, end) # chr must be a sequence name
if (iter == None):
sys.stderr.write("[Tabix] Malformatted query or wrong sequence name.\n")
return
while (1): # iterate
s = self.tabix.ti_read(self.fp, iter, 0)
if (s == None): break
yield s
self.tabix.ti_iter_destroy(iter)
# command-line interface
def main():
if (len(sys.argv) < 3):
sys.stderr.write("Usage: tabix.py <in.gz> <reg>\n")
sys.exit(1)
# report the features in the requested interval
tabix = Tabix(sys.argv[1])
for line in tabix.fetch(sys.argv[2]):
print line
if __name__ == '__main__':
main()
|
virtual/lib/python3.8/site-packages/openapi_spec_validator/handlers/utils.py | dan-mutua/flaskwk3 | 201 | 12605241 | import os.path
from six.moves.urllib.parse import urlparse, unquote
from six.moves.urllib.request import url2pathname
def uri_to_path(uri):
parsed = urlparse(uri)
host = "{0}{0}{mnt}{0}".format(os.path.sep, mnt=parsed.netloc)
return os.path.normpath(
os.path.join(host, url2pathname(unquote(parsed.path)))
)
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/core/backup/tasks/__init__.py | disrupted/Trakttv.bundle | 1,346 | 12605258 | from plugin.core.backup.tasks.archive import ArchiveTask
from plugin.core.backup.tasks.compact import CompactTask
|
src/strategy/python/strategy.py | oxnz/design-patterns | 117 | 12605281 | '''http://stackoverflow.com/questions/963965/how-is-this-strategy-pattern-written-in-python-the-sample-in-wikipedia'''
import types
class StrategyExample:
def __init__(self, func=None):
self.name = "Strategy Example 0"
if func :
self.execute = types.MethodType(func, self)
def execute(self):
print(self.name)
def executeReplacement1(self):
print(self.name + " from execute 1")
def executeReplacement2(self):
print(self.name + " from execute 2")
if __name__ == "__main__":
strat0 = StrategyExample()
strat1 = StrategyExample(executeReplacement1)
strat1.name = "Strategy Example 1"
strat2 = StrategyExample(executeReplacement2)
strat2.name = "Strategy Example 2"
strat0.execute()
strat1.execute()
strat2.execute()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.