max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Katna/config.py | viddik13/katna | 125 | 36466 | <gh_stars>100-1000
"""
.. module:: Katna.config
:platform: Platfrom Independent
:synopsis: This module defines some helpful configuration variables
"""
import os
# # Configuration parameters for Image class
class Image:
# default value by which image size to be reduces for processing
down_sample_factor = 8
# Debug flag
DEBUG = False
# Crop_height_reduction_factor_in_each_iterationnot found crop height
# will be reduced by this multiplier/factor and search for candidate crops
# is resumed.
# Decreasing the height and width for crops while checking it don't get small by 1/(min_image_to_crop_factor) of image height/width
min_image_to_crop_factor = 4
crop_height_reduction_factor_in_each_iteration = 0.05
# # Configurations for Scoring crops for crop extractor
class CropScorer:
detail_weight = 0.2 # default weight value for detail parameter
edge_radius = 0.4 # default edge radius
edge_weight = -20 # default edge weight
outside_importance = (
-0.5
) # default value to set if the pixel is outside crop rectangle
rule_of_thirds = True # boolean to set rule of third condition check
saliency_bias = 0.2 # bias color value for saliency(+- error value)
saliency_weight = 1.3 # default edge radius
face_bias = 0.01 # bias color value for face(+- error value)
face_weight = 3.4 # default weight value for face parameter
rects_weight = 1 # default weight value for crop rectangles
# # Configurations for Text detection class
class TextDetector:
# Min Confidence Threshold for Text detection model
min_confidence = 0.9
# Threshold for merging text detection boxes
merge_threshold = 1
# Name of Model files to be used for text detection
frozen_weights = "frozen_east_text_detection.pb"
# Location where model file will be downloaded
cache_subdir = "models"
# Layers Name for text detection
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
# Download Link for Text detection model
model_download_link = "https://github.com/oyyd/frozen_east_text_detection.pb/raw/master/frozen_east_text_detection.pb"
# # Configurations for Edge Feature class
class EdgeFeature:
# min edge threshold value
min_val_threshold = 100
# Max edge threshold value
max_val_threshold = 200
# aperture_size/size of Sobel kernel for canny edge detector
ksize = 3
# # Configurations for Face detection Feature class
class FaceFeature:
# Model file name to be used for face detection
model_file = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
# Model definition file name to be used for face detetion
prototxt_file = "deploy.prototxt"
# Location where model file will be downloaded
cache_subdir = "models"
# Min Confidence Threshold for face detection model
confidence = 0.5
# Download Link for face detection model defintion file
prototxt_download_link = "https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"
# Download Link for face detection model
modelfile_download_link = "https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# # Configuration parameters for Video class
class Video:
# Debug flag
DEBUG = False
min_video_duration = 5.0
# consume % of memory during video keyframe extraction
# 80% of available memory will be consumed
memory_consumption_threshold = 0.80
# assumed numbers of frames within which 1 candidate frames which might be available
# seconds to reach threshold if all frames are collected, but not all are candidate frames
# currently we assume 1 in 5 frame for that
assumed_no_of_frames_per_candidate_frame = 5
# if video duration greater than this number video will be treated as a large video
video_split_threshold_in_minutes = 20
# https://trac.ffmpeg.org/wiki/Encode/H.264
# Keep this between 20 to 30 value
video_compression_crf_parameter = 23
video_compression_codec = "libx264" # Currently "libx264 and is supported"
compression_output_file_extension = "mp4"
# Supported/valid video extensions supported by ffmpeg
# You can generate updated list by using following shell script on MacOSX or Linux
# $ ffmpeg -demuxers -hide_banner | tail -n +5 | cut -d' ' -f4 | xargs -I{} ffmpeg -hide_banner -h demuxer={} | grep 'Common extensions' | cut -d' ' -f7 | tr ',' $'\n' | tr -d '.'
video_extensions = [
".str",
".aa",
".aac",
".ac3",
".acm",
".adf",
".adp",
".dtk",
".ads",
".ss2",
".adx",
".aea",
".afc",
".aix",
".al",
".ape",
".apl",
".mac",
".aptx",
".aptxhd",
".aqt",
".ast",
".avi",
".avr",
".bfstm",
".bcstm",
".bit",
".bmv",
".brstm",
".cdg",
".cdxl",
".xl",
".c2",
".302",
".daud",
".str",
".dss",
".dts",
".dtshd",
".dv",
".dif",
".cdata",
".eac3",
".paf",
".fap",
".flm",
".flac",
".flv",
".fsb",
".g722",
".722",
".tco",
".rco",
".g723_1",
".g729",
".genh",
".gsm",
".h261",
".h26l",
".h264",
".264",
".avc",
".hevc",
".h265",
".265",
".idf",
".cgi",
".sf",
".ircam",
".ivr",
".flv",
".lvf",
".m4v",
".mkv",
".mk3d",
".mka",
".mks",
".mjpg",
".mjpeg",
".mpo",
".j2k",
".mlp",
".mov",
".mp4",
".m4a",
".3gp",
".3g2",
".mj2",
".mp2",
".mp3",
".m2a",
".mpa",
".mpc",
".mjpg",
".txt",
".mpl2",
".sub",
".msf",
".mtaf",
".ul",
".musx",
".mvi",
".mxg",
".v",
".nist",
".sph",
".nsp",
".nut",
".ogg",
".oma",
".omg",
".aa3",
".pjs",
".pvf",
".yuv",
".cif",
".qcif",
".rgb",
".rt",
".rsd",
".rsd",
".rso",
".sw",
".sb",
".smi",
".sami",
".sbc",
".msbc",
".sbg",
".scc",
".sdr2",
".sds",
".sdx",
".shn",
".vb",
".son",
".sln",
".mjpg",
".stl",
".sub",
".sub",
".sup",
".svag",
".tak",
".thd",
".tta",
".ans",
".art",
".asc",
".diz",
".ice",
".nfo",
".txt",
".vt",
".ty",
".ty+",
".uw",
".ub",
".v210",
".yuv10",
".vag",
".vc1",
".viv",
".idx",
".vpk",
".txt",
".vqf",
".vql",
".vqe",
".vtt",
".wsd",
".xmv",
".xvag",
".yop",
".y4m",
]
# Configuration parameters for mediapipe
class MediaPipe:
class AutoFlip:
# Rerun is required due to autoflip issue mentione here:
# https://github.com/google/mediapipe/issues/497
RERUN_LIMIT = 2
# Models folder location
MODELS_FOLDER_LOCATION = os.path.join(os.getcwd(), "mediapipe", "models")
# pbtxt temp folder name
TMP_PBTXT_FOLDER_NAME = "temp_pbtxt"
TMP_PBTXT_FOLDER_PATH = os.path.join(os.getcwd(), TMP_PBTXT_FOLDER_NAME)
# Default pbtxt and build cmd
CONFIG_FILE_PBTXT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mediapipe_autoflip.pbtxt"
)
BUILD_CMD = "run_autoflip"
# user friendly conf keys
ENFORCE_FEATURES_KEYNAME = "ENFORCE_FEATURES"
STABALIZATION_THRESHOLD_KEYNAME = "STABALIZATION_THRESHOLD"
BLUR_AREA_OPACITY_KEYNAME = "BLUR_AREA_OPACITY"
# DEFAULT VALUES IN PBTXT
DEFAULT_BLUR_AREA_OPACITY = 0.6
DEFAULT_MOTION_STABALIZATION_THRESHOLD = 0.5
DEFAULT_FEATURE_SIGNAL_VALUE = "false"
# ENFORCE_FEATURES Keys
_FACE_CORE_LANDMARKS = "FACE_CORE_LANDMARKS"
_FACE_FULL = "FACE_FULL"
_FACE_ALL_LANDMARKS = "FACE_ALL_LANDMARKS"
_HUMAN = "HUMAN"
_PET = "PET"
_CAR = "CAR"
_OBJECT = "OBJECT"
# the variables names below should match the keyname for set_conf to work
# smoothly
# ENFORCE_FEATURES list
ENFORCE_FEATURES = {
_FACE_CORE_LANDMARKS: False,
_FACE_ALL_LANDMARKS: False,
_FACE_FULL: False,
_HUMAN: False,
_PET: False,
_CAR: False,
_OBJECT: False,
}
# % AREA from center where most of the content is
# usually applied when content is focused near center
STABALIZATION_THRESHOLD = DEFAULT_MOTION_STABALIZATION_THRESHOLD
# opacity of blur area
BLUR_AREA_OPACITY = DEFAULT_BLUR_AREA_OPACITY
@classmethod
def get_pbtxt_mapping(cls):
return {
cls.ENFORCE_FEATURES_KEYNAME: "signal_settings",
cls.STABALIZATION_THRESHOLD_KEYNAME: "motion_stabilization_threshold_percent",
cls.BLUR_AREA_OPACITY_KEYNAME: "overlay_opacity",
}
@classmethod
def get_conf(cls):
"""Gets the current config
:return: dictionary containing the current config
:rtype: dict
"""
return {
cls.ENFORCE_FEATURES_KEYNAME: cls.ENFORCE_FEATURES,
cls.STABALIZATION_THRESHOLD_KEYNAME: cls.STABALIZATION_THRESHOLD,
cls.BLUR_AREA_OPACITY_KEYNAME: cls.BLUR_AREA_OPACITY,
}
@classmethod
def set_conf(cls, config):
"""Sets the config passed
:param config: The configuration to set.
:type config: dict
"""
for attr in config.keys():
current_conf = cls.get_conf()
if attr in current_conf.keys():
if attr == cls.ENFORCE_FEATURES_KEYNAME:
updated_attr_dict = {**current_conf[attr], **config[attr]}
setattr(cls, attr, updated_attr_dict)
else:
setattr(cls, attr, config[attr])
else:
raise Exception(
" Invalid configuration. Use get_conf method to see existing configuration or refer documentation."
)
class ImageSelector:
# Setting for optimum Brightness values
min_brightness_value = 10.0
max_brightness_value = 90.0
brightness_step = 2.0
# Setting for optimum Contrast/Entropy values
min_entropy_value = 1.0
max_entropy_value = 10.0
entropy_step = 0.5
class FrameExtractor:
# Setting local maxima criteria
USE_LOCAL_MAXIMA = True
# Lenght of sliding window taking difference
len_window = 20
# Chunk size of Images to be processed at a time in memory
max_frames_in_chunk = 500
# Type of smoothening window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing.
window_type = "hanning"
|
plaso/formatters/manager.py | pyllyukko/plaso | 1,253 | 36474 | # -*- coding: utf-8 -*-
"""Manages custom event formatter helpers."""
class FormattersManager(object):
"""Custom event formatter helpers manager."""
_custom_formatter_helpers = {}
@classmethod
def GetEventFormatterHelper(cls, identifier):
"""Retrieves a custom event formatter helper.
Args:
identifier (str): identifier.
Returns:
CustomEventFormatterHelper: custom event formatter or None if not
available.
"""
identifier = identifier.lower()
return cls._custom_formatter_helpers.get(identifier)
@classmethod
def RegisterEventFormatterHelper(cls, formatter_helper_class):
"""Registers a custom event formatter helper.
The custom event formatter helpers are identified based on their lower
case identifier.
Args:
formatter_helper_class (type): class of the custom event formatter helper.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding identifier.
"""
identifier = formatter_helper_class.IDENTIFIER.lower()
if identifier in cls._custom_formatter_helpers:
raise KeyError((
'Custom event formatter helper already set for identifier: '
'{0:s}.').format(formatter_helper_class.IDENTIFIER))
cls._custom_formatter_helpers[identifier] = formatter_helper_class()
@classmethod
def RegisterEventFormatterHelpers(cls, formatter_helper_classes):
"""Registers custom event formatter helpers.
The formatter classes are identified based on their lower case data type.
Args:
formatter_helper_classes (list[type]): classes of the custom event
formatter helpers.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding data type.
"""
for formatter_helper_class in formatter_helper_classes:
cls.RegisterEventFormatterHelper(formatter_helper_class)
|
custom_latex_cell_style/scenario2/ipython_nbconvert_config.py | isabella232/nbconvert-examples | 120 | 36492 | c = get_config()
#Export all the notebooks in the current directory to the sphinx_howto format.
c.NbConvertApp.notebooks = ['*.ipynb']
c.NbConvertApp.export_format = 'latex'
c.NbConvertApp.postprocessor_class = 'PDF'
c.Exporter.template_file = 'custom_article.tplx'
|
bin/terminology.py | cedzz/python-patterns | 631 | 36513 | #!/usr/bin/env python3
"""Count the frequency of various phrases, given the path to the Python PEPs.
In Python PEPs, the opposite of “subclass” is almost always “base class” — just remember that the builtin is named super(), not base()! Stats:
216 base class
0 child class
10 derived class
12 parent class
372 subclass
10 super class
44 superclass
"""
import argparse
import os
import re
import sys
TERMS = (
'superclass',
'super class',
'subclass',
'base class',
'derived class',
'parent class',
'child class',
)
def main(argv):
parser = argparse.ArgumentParser(description='PEP terminology counts')
parser.add_argument('pepsdir', help='path to PEPs repo')
try:
args = parser.parse_args(argv)
except SystemExit:
print('\nTo checkout the PEPs from version control, git clone:'
'\nhttps://github.com/python/peps.git', file=sys.stderr)
raise
peps = []
for dirpath, dirnames, filenames in os.walk(args.pepsdir):
for filename in filenames:
if filename.endswith(('.rst', '.txt')):
peps.append(os.path.join(dirpath, filename))
counts = {term: 0 for term in TERMS}
for pep in peps:
with open(pep) as f:
content = f.read()
text = ' '.join(re.findall('\w+', content.lower()))
#text = ' '.join(content.lower().replace('.'), ' ').split())
for term in TERMS:
n = text.count(' ' + term + ' ')
m = text.count(' ' + term + 'es ')
counts[term] += n + m
for term in sorted(TERMS):
print('{:5} {}'.format(counts[term], term))
if __name__ == '__main__':
main(sys.argv[1:])
|
rotkehlchen/externalapis/bisq_market.py | rotkehlchenio/rotkehlchen | 137 | 36515 | import json
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.types import Price
PRICE_API_URL = 'https://bisq.markets/api/ticker?market={symbol}_BTC'
def get_bisq_market_price(asset: Asset) -> Price:
"""
Get price for pair at bisq marketplace. Price is returned against BTC.
Can raise:
- RemoteError: If the market doesn't exists or request fails
- DeserializationError: If the data returned is not a valid price
"""
url = PRICE_API_URL.format(symbol=asset.symbol)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e
try:
data = response.json()
except json.decoder.JSONDecodeError as e:
raise RemoteError(
f'Failed to read json response from bisq.markets. {response.text}. {str(e)}',
) from e
if 'error' in data:
raise RemoteError(f'Request data from bisq.markets {url} is not valid {data["error"]}')
try:
price = data['last']
except KeyError as e:
raise DeserializationError(
f'Response from bisq.markets didnt contain expected key "last". {data}',
) from e
return deserialize_price(price)
|
chapter4/chapter4_pydantic_types_01.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | 107 | 36520 | <reponame>GoodMonsters/Building-Data-Science-Applications-with-FastAPI
from pydantic import BaseModel, EmailStr, HttpUrl, ValidationError
class User(BaseModel):
email: EmailStr
website: HttpUrl
# Invalid email
try:
User(email="jdoe", website="https://www.example.com")
except ValidationError as e:
print(str(e))
# Invalid URL
try:
User(email="<EMAIL>", website="jdoe")
except ValidationError as e:
print(str(e))
# Valid
user = User(email="<EMAIL>", website="https://www.example.com")
# email='<EMAIL>' website=HttpUrl('https://www.example.com', scheme='https', host='www.example.com', tld='com', host_type='domain')
print(user)
|
examples/shapes_from_glsl/cylinder_shape.py | szabolcsdombi/zengl | 116 | 36549 | import zengl
from defaults import defaults
from grid import grid_pipeline
from window import Window
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
ctx.includes['defaults'] = defaults
grid = grid_pipeline(ctx, [image, depth])
pipeline = ctx.pipeline(
vertex_shader='''
#version 330
#include "defaults"
vec3 vertices[24] = vec3[](
vec3(0.000000, 1.000000, -0.500000),
vec3(0.000000, 1.000000, 0.500000),
vec3(0.500000, 0.866025, -0.500000),
vec3(0.500000, 0.866025, 0.500000),
vec3(0.866025, 0.500000, -0.500000),
vec3(0.866025, 0.500000, 0.500000),
vec3(1.000000, -0.000000, -0.500000),
vec3(1.000000, -0.000000, 0.500000),
vec3(0.866025, -0.500000, -0.500000),
vec3(0.866025, -0.500000, 0.500000),
vec3(0.500000, -0.866025, -0.500000),
vec3(0.500000, -0.866025, 0.500000),
vec3(-0.000000, -1.000000, -0.500000),
vec3(-0.000000, -1.000000, 0.500000),
vec3(-0.500000, -0.866025, -0.500000),
vec3(-0.500000, -0.866025, 0.500000),
vec3(-0.866025, -0.500000, -0.500000),
vec3(-0.866025, -0.500000, 0.500000),
vec3(-1.000000, 0.000000, -0.500000),
vec3(-1.000000, 0.000000, 0.500000),
vec3(-0.866025, 0.500000, -0.500000),
vec3(-0.866025, 0.500000, 0.500000),
vec3(-0.500000, 0.866025, -0.500000),
vec3(-0.500000, 0.866025, 0.500000)
);
vec3 normals[14] = vec3[](
vec3(-0.0000, 1.0000, -0.0000),
vec3(0.5000, 0.8660, -0.0000),
vec3(0.8660, 0.5000, -0.0000),
vec3(1.0000, -0.0000, -0.0000),
vec3(0.8660, -0.5000, -0.0000),
vec3(0.5000, -0.8660, -0.0000),
vec3(-0.0000, -1.0000, -0.0000),
vec3(-0.5000, -0.8660, -0.0000),
vec3(-0.8660, -0.5000, -0.0000),
vec3(-1.0000, -0.0000, -0.0000),
vec3(-0.8660, 0.5000, -0.0000),
vec3(-0.0000, -0.0000, 1.0000),
vec3(-0.5000, 0.8660, -0.0000),
vec3(-0.0000, -0.0000, -1.0000)
);
vec2 texcoords[50] = vec2[](
vec2(1.000000, 0.500000),
vec2(0.000000, 0.500000),
vec2(0.750000, 0.490000),
vec2(1.000000, 1.000000),
vec2(0.250000, 0.490000),
vec2(0.000000, 1.000000),
vec2(0.916667, 0.500000),
vec2(0.870000, 0.457846),
vec2(0.916667, 1.000000),
vec2(0.370000, 0.457846),
vec2(0.833333, 0.500000),
vec2(0.957846, 0.370000),
vec2(0.833333, 1.000000),
vec2(0.457846, 0.370000),
vec2(0.750000, 0.500000),
vec2(0.990000, 0.250000),
vec2(0.750000, 1.000000),
vec2(0.490000, 0.250000),
vec2(0.666667, 0.500000),
vec2(0.957846, 0.130000),
vec2(0.666667, 1.000000),
vec2(0.457846, 0.130000),
vec2(0.583333, 0.500000),
vec2(0.870000, 0.042154),
vec2(0.583333, 1.000000),
vec2(0.370000, 0.042154),
vec2(0.500000, 0.500000),
vec2(0.750000, 0.010000),
vec2(0.500000, 1.000000),
vec2(0.250000, 0.010000),
vec2(0.416667, 0.500000),
vec2(0.630000, 0.042154),
vec2(0.416667, 1.000000),
vec2(0.130000, 0.042154),
vec2(0.333333, 0.500000),
vec2(0.542154, 0.130000),
vec2(0.333333, 1.000000),
vec2(0.042154, 0.130000),
vec2(0.250000, 0.500000),
vec2(0.510000, 0.250000),
vec2(0.250000, 1.000000),
vec2(0.010000, 0.250000),
vec2(0.166667, 0.500000),
vec2(0.542154, 0.370000),
vec2(0.042154, 0.370000),
vec2(0.166667, 1.000000),
vec2(0.083333, 0.500000),
vec2(0.630000, 0.457846),
vec2(0.130000, 0.457846),
vec2(0.083333, 1.000000)
);
int vertex_indices[132] = int[](
1, 2, 0, 3, 4, 2, 5, 6, 4, 7, 8, 6, 9, 10, 8, 11, 12, 10, 13, 14, 12, 15, 16, 14, 17, 18, 16, 19, 20, 18,
21, 13, 5, 21, 22, 20, 23, 0, 22, 6, 14, 22, 1, 3, 2, 3, 5, 4, 5, 7, 6, 7, 9, 8, 9, 11, 10, 11, 13, 12, 13,
15, 14, 15, 17, 16, 17, 19, 18, 19, 21, 20, 5, 3, 1, 1, 23, 21, 21, 19, 17, 17, 15, 13, 13, 11, 9, 9, 7, 5,
5, 1, 21, 21, 17, 13, 13, 9, 5, 21, 23, 22, 23, 1, 0, 22, 0, 2, 2, 4, 6, 6, 8, 10, 10, 12, 14, 14, 16, 18,
18, 20, 22, 22, 2, 6, 6, 10, 14, 14, 18, 22
);
int normal_indices[132] = int[](
0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8, 7, 8, 9, 8, 9, 10, 9, 11, 11, 11, 10,
12, 10, 12, 0, 12, 13, 13, 13, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8,
9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 10, 12, 12, 12, 0, 0, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
);
int texcoord_indices[132] = int[](
3, 6, 0, 8, 10, 6, 12, 14, 10, 16, 18, 14, 20, 22, 18, 24, 26, 22, 28, 30, 26, 32, 34, 30, 36, 38, 34, 40,
42, 38, 44, 29, 13, 45, 46, 42, 49, 1, 46, 15, 31, 47, 3, 8, 6, 8, 12, 10, 12, 16, 14, 16, 20, 18, 20, 24,
22, 24, 28, 26, 28, 32, 30, 32, 36, 34, 36, 40, 38, 40, 45, 42, 13, 9, 4, 4, 48, 44, 44, 41, 37, 37, 33,
29, 29, 25, 21, 21, 17, 13, 13, 4, 44, 44, 37, 29, 29, 21, 13, 45, 49, 46, 49, 5, 1, 47, 2, 7, 7, 11, 15,
15, 19, 23, 23, 27, 31, 31, 35, 39, 39, 43, 47, 47, 7, 15, 15, 23, 31, 31, 39, 47
);
out vec3 v_vertex;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
v_vertex = vertices[vertex_indices[gl_VertexID]];
v_normal = normals[normal_indices[gl_VertexID]];
v_texcoord = texcoords[texcoord_indices[gl_VertexID]];
gl_Position = mvp * vec4(v_vertex, 1.0);
}
''',
fragment_shader='''
#version 330
#include "defaults"
in vec3 v_normal;
layout (location = 0) out vec4 out_color;
void main() {
float lum = dot(normalize(light.xyz), normalize(v_normal)) * 0.7 + 0.3;
out_color = vec4(lum, lum, lum, 1.0);
}
''',
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_count=132,
)
while window.update():
image.clear()
depth.clear()
grid.render()
pipeline.render()
image.blit()
|
prohmr/models/heads/__init__.py | akashsengupta1997/ProHMR | 120 | 36556 | <filename>prohmr/models/heads/__init__.py<gh_stars>100-1000
from .smpl_flow import SMPLFlow
from .skeleton_flow import SkeletonFlow
from .fc_head import FCHead |
examples/issues/issue345_docs2.py | tgolsson/appJar | 666 | 36566 | <reponame>tgolsson/appJar
import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
if btn == "FIRST": app.firstFrame("Pages")
elif btn == "NEXT": app.nextFrame("Pages")
elif btn == "PREV": app.prevFrame("Pages")
elif btn == "LAST": app.lastFrame("Pages")
def changed():
msg = "Changed from: " + str(app.getPreviousFrame("Pages")) + " to " + str(app.getCurrentFrame("Pages"))
print(msg)
# return app.okBox("Sure?", msg)
with gui("FRAME STACK") as app:
with app.frameStack("Pages", change=changed):#, start=1):
with app.frame(bg='red'):
for i in range(5):
app.label("Text: " + str(i))
with app.frame(bg='green'):
for i in range(5):
app.entry("e" + str(i))
with app.frame(bg='pink'):
for i in range(5):
app.button(str(i), None)
app.buttons(["FIRST", "PREV", "NEXT", "LAST"], press)
changed()
|
binary_tree_postorder_traversal/solution.py | mahimadubey/leetcode-python | 528 | 36582 | <filename>binary_tree_postorder_traversal/solution.py
"""
Given a binary tree, return the postorder traversal of its nodes' values.
For example:
Given binary tree {1,#,2,3},
1
\
2
/
3
return [3,2,1].
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
path = []
if root is None:
return path
stack1 = []
stack2 = []
stack1.append(root)
while stack1:
root = stack1.pop()
stack2.append(root.val)
if root.left is not None:
stack1.append(root.left)
if root.right is not None:
stack1.append(root.right)
while stack2:
path.append(stack2.pop())
return path
|
figures/perception/randomwalk.py | patricknaughton01/RoboticSystemsBook | 116 | 36600 | <reponame>patricknaughton01/RoboticSystemsBook<filename>figures/perception/randomwalk.py<gh_stars>100-1000
import matplotlib.pyplot as plt
import numpy as np
from kalman import *
def kf_trace(F,g,P,H,j,Q,Xmean,Xvar,Z):
if not isinstance(F,np.ndarray): F = np.array([[F]])
if not isinstance(g,np.ndarray): g = np.array([g])
if not isinstance(P,np.ndarray): P = np.array([[P]])
if H is not None:
if not isinstance(H,np.ndarray): H = np.array([[H]])
if not isinstance(j,np.ndarray): j = np.array([j])
if not isinstance(Q,np.ndarray): Q = np.array([[Q]])
if not isinstance(Xmean,np.ndarray): Xmean = np.array([Xmean])
if not isinstance(Xvar,np.ndarray): Xvar = np.array([[Xvar]])
cur_mean,cur_cov = Xmean,Xvar
res_mean = [cur_mean]
res_cov = [cur_cov]
for z in Z:
if not isinstance(z,np.ndarray): z = np.array([z])
cur_mean,cur_cov = kalman_filter_predict(cur_mean,cur_cov,F,g,P)
if H is not None:
cur_mean,cur_cov = kalman_filter_update(cur_mean,cur_cov,F,g,P,H,j,Q,z)
res_mean.append(cur_mean)
res_cov.append(cur_cov)
return res_mean,res_cov
T = 100
N = 20
dt = 0.1
motion_noise_magnitude = 1.0
noise_magnitude = 0.3
fig1 = plt.figure(figsize=(10,4))
ax1 = fig1.add_subplot(1, 2, 1)
ax1.set_xlabel("Time")
ax1.set_ylabel("State")
ax1.set_ylim(-3,3)
ax1.set_xlim(0,10)
x = np.array(range(T))*dt
for i in xrange(N):
eps = np.random.normal(size=T)*motion_noise_magnitude
y = np.cumsum(eps*dt)
ax1.plot(x,y)
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=None,j=None,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=eps)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
kf_pred, = ax1.plot(x,y[:-1],label="KF prediction")
ax1.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.legend(handles=[kf_pred])
ax2 = fig1.add_subplot(1, 2, 2)
ax2.set_xlabel("Time")
ax2.set_ylabel("State")
ax2.set_ylim(-3,3)
ax2.set_xlim(0,10)
#eps_truth = np.random.normal(size=T)
#y_truth = np.cumsum(eps*dt)
y_truth = np.sin(np.array(range(T))*dt*0.5)*1.0
x = np.array(range(T))*dt
z = y_truth + np.random.normal(size=T)*noise_magnitude
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=1,j=0,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=z)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
Zmse = np.sqrt(np.sum((z-y_truth)**2))
KFmse = np.sqrt(np.sum((y[:-1]-y_truth)**2))
print "Z MSE",Zmse
print "KF MSE",KFmse
print "Reduction (%)",(Zmse-KFmse)/Zmse*100
ground_truth, = ax2.plot(x,y_truth,label="Ground truth",color='k')
obs = ax2.scatter(x,z,label="Observations",color='gray',s=9)
kf_estimate, = ax2.plot(x,y[:-1],label="KF estimate")
ax2.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.legend(handles=[ground_truth,obs,kf_estimate])
plt.show()
|
CTFd/constants/themes.py | nox237/CTFd | 3,592 | 36609 | <filename>CTFd/constants/themes.py
ADMIN_THEME = "admin"
DEFAULT_THEME = "core"
|
tests/unit/model_selection/test_model_selection.py | ambader/hcrystalball | 139 | 36652 | import numpy as np
import pytest
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from hcrystalball.metrics import get_scorer
from hcrystalball.model_selection import FinerTimeSplit
from hcrystalball.model_selection import get_best_not_failing_model
from hcrystalball.model_selection import select_model
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
@pytest.mark.parametrize(
"train_data, grid_search, parallel_over_dict",
[("two_regions", "", {"Region": "region_0"}), ("two_regions", "", None)],
indirect=["train_data", "grid_search"],
)
def test_select_model(train_data, grid_search, parallel_over_dict):
_train_data = train_data
if parallel_over_dict:
col, value = list(parallel_over_dict.items())[0]
_train_data = train_data[train_data[col] == value].drop(columns="Region")
partition_columns = ["Region", "Product"]
results = select_model(
_train_data,
target_col_name="Quantity",
partition_columns=partition_columns,
parallel_over_dict=parallel_over_dict,
grid_search=grid_search,
country_code_column="Holidays_code",
)
if parallel_over_dict:
partitions = (
train_data.loc[train_data[col] == value, partition_columns]
.drop_duplicates()
.to_dict(orient="records")
)
else:
partitions = train_data[partition_columns].drop_duplicates().to_dict(orient="records")
assert len(results) == len(partitions)
for result in results:
assert result.best_model_name == "good_dummy"
assert result.partition in partitions
@pytest.mark.parametrize(
"X_y_optional, negative_data, best_model_name, rank, expected_error",
[
("", False, "ExponentialSmoothingWrapper", 1, None),
("", True, "SklearnWrapper", 2, None),
("", True, "", 2, ValueError),
],
indirect=["X_y_optional"],
)
def test_get_best_not_failing_model(X_y_optional, negative_data, best_model_name, rank, expected_error):
X, y = X_y_optional
# data contains 0
y[y < 1] = 1
if negative_data:
y[-1] = -1
models = [
ExponentialSmoothingWrapper(freq="D", seasonal="mul"),
get_sklearn_wrapper(DummyRegressor, strategy="constant", constant=-1000),
]
models = models if expected_error is None else models[:1]
grid_search = GridSearchCV(
estimator=Pipeline([("model", "passthrough")]),
param_grid=[{"model": models}],
scoring=get_scorer("neg_mean_absolute_error"),
cv=FinerTimeSplit(n_splits=1, horizon=5),
refit=False,
error_score=np.nan,
)
grid_search.fit(X, y)
if expected_error:
with pytest.raises(expected_error):
get_best_not_failing_model(grid_search, X, y)
else:
best_param_rank = get_best_not_failing_model(grid_search, X, y)
assert isinstance(best_param_rank, dict)
assert best_param_rank["params"]["model"].__class__.__name__ == best_model_name
assert best_param_rank["rank"] == rank
|
src/rust/iced-x86-py/src/iced_x86/CC_g.py | clayne/iced | 1,018 | 36668 | # SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
Mnemonic condition code selector (eg. ``JG`` / ``JNLE``)
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import CC_g
else:
CC_g = int
G: CC_g = 0 # type: ignore
"""
``JG``, ``CMOVG``, ``SETG``
"""
NLE: CC_g = 1 # type: ignore
"""
``JNLE``, ``CMOVNLE``, ``SETNLE``
"""
|
LeetCode/python3/1025.py | ZintrulCre/LeetCode_Archiver | 279 | 36682 | class Solution:
def divisorGame(self, N: int) -> bool:
return True if N % 2 == 0 else False |
gabbi/exception.py | scottwallacesh/gabbi | 145 | 36683 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Gabbi specific exceptions."""
class GabbiDataLoadError(ValueError):
"""An exception to alert when data streams cannot be loaded."""
pass
class GabbiFormatError(ValueError):
"""An exception to encapsulate poorly formed test data."""
pass
class GabbiSyntaxWarning(SyntaxWarning):
"""A warning about syntax that is not desirable."""
pass
|
availability/__init__.py | Leader0721/ManyIP | 629 | 36694 | <gh_stars>100-1000
# -*- coding: UTF-8 -*-
import config
import gevent
import availability.check
from persistence import persister
import time
def crawl_worker(queue_verification, queue_persistence):
"""
爬取下来的代理检测可用性的进程
:param queue_verification: 待验证代理队列
:param queue_persistence: 已验证待保存代理队列
:return:
"""
while True:
spawns = list()
for i in range(config.COROUTINE_NUM):
proxy = queue_verification.get()
spawns.append(gevent.spawn(availability.check.crawl_handle, 'http', proxy, queue_persistence))
spawns.append(gevent.spawn(availability.check.crawl_handle, 'https', proxy, queue_persistence))
gevent.joinall(spawns)
def store_worker():
"""
已保存的代理每隔一段时间重新验证可用性的进程
"""
while True:
all_proxies = persister.list(count='all', columns='all')
spawns = list()
for proxy in all_proxies:
if proxy['protocol'] == 'http':
spawns.append(gevent.spawn(availability.check.store_handle, 'http', proxy, persister))
else:
spawns.append(gevent.spawn(availability.check.store_handle, 'https', proxy, persister))
if len(spawns) == config.COROUTINE_NUM:
gevent.joinall(spawns)
spawns.clear()
gevent.joinall(spawns)
spawns.clear()
time.sleep(config.PROXY_STORE_CHECK_SEC)
|
slack_sdk/scim/v1/user.py | priya1puresoftware/python-slack-sdk | 2,486 | 36700 | <filename>slack_sdk/scim/v1/user.py
from typing import Optional, Any, List, Dict, Union
from .default_arg import DefaultArg, NotGiven
from .internal_utils import _to_dict_without_not_given, _is_iterable
from .types import TypeAndValue
class UserAddress:
country: Union[Optional[str], DefaultArg]
locality: Union[Optional[str], DefaultArg]
postal_code: Union[Optional[str], DefaultArg]
primary: Union[Optional[bool], DefaultArg]
region: Union[Optional[str], DefaultArg]
street_address: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
country: Union[Optional[str], DefaultArg] = NotGiven,
locality: Union[Optional[str], DefaultArg] = NotGiven,
postal_code: Union[Optional[str], DefaultArg] = NotGiven,
primary: Union[Optional[bool], DefaultArg] = NotGiven,
region: Union[Optional[str], DefaultArg] = NotGiven,
street_address: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.country = country
self.locality = locality
self.postal_code = postal_code
self.primary = primary
self.region = region
self.street_address = street_address
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserEmail(TypeAndValue):
pass
class UserPhoneNumber(TypeAndValue):
pass
class UserRole(TypeAndValue):
pass
class UserGroup:
display: Union[Optional[str], DefaultArg]
value: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
display: Union[Optional[str], DefaultArg] = NotGiven,
value: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.display = display
self.value = value
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserMeta:
created: Union[Optional[str], DefaultArg]
location: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
created: Union[Optional[str], DefaultArg] = NotGiven,
location: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.created = created
self.location = location
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserName:
family_name: Union[Optional[str], DefaultArg]
given_name: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
family_name: Union[Optional[str], DefaultArg] = NotGiven,
given_name: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.family_name = family_name
self.given_name = given_name
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserPhoto:
type: Union[Optional[str], DefaultArg]
value: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
type: Union[Optional[str], DefaultArg] = NotGiven,
value: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.type = type
self.value = value
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class User:
active: Union[Optional[bool], DefaultArg]
addresses: Union[Optional[List[UserAddress]], DefaultArg]
display_name: Union[Optional[str], DefaultArg]
emails: Union[Optional[List[TypeAndValue]], DefaultArg]
external_id: Union[Optional[str], DefaultArg]
groups: Union[Optional[List[UserGroup]], DefaultArg]
id: Union[Optional[str], DefaultArg]
meta: Union[Optional[UserMeta], DefaultArg]
name: Union[Optional[UserName], DefaultArg]
nick_name: Union[Optional[str], DefaultArg]
phone_numbers: Union[Optional[List[TypeAndValue]], DefaultArg]
photos: Union[Optional[List[UserPhoto]], DefaultArg]
profile_url: Union[Optional[str], DefaultArg]
roles: Union[Optional[List[TypeAndValue]], DefaultArg]
schemas: Union[Optional[List[str]], DefaultArg]
timezone: Union[Optional[str], DefaultArg]
title: Union[Optional[str], DefaultArg]
user_name: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
active: Union[Optional[bool], DefaultArg] = NotGiven,
addresses: Union[
Optional[List[Union[UserAddress, Dict[str, Any]]]], DefaultArg
] = NotGiven,
display_name: Union[Optional[str], DefaultArg] = NotGiven,
emails: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
external_id: Union[Optional[str], DefaultArg] = NotGiven,
groups: Union[
Optional[List[Union[UserGroup, Dict[str, Any]]]], DefaultArg
] = NotGiven,
id: Union[Optional[str], DefaultArg] = NotGiven,
meta: Union[Optional[Union[UserMeta, Dict[str, Any]]], DefaultArg] = NotGiven,
name: Union[Optional[Union[UserName, Dict[str, Any]]], DefaultArg] = NotGiven,
nick_name: Union[Optional[str], DefaultArg] = NotGiven,
phone_numbers: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
photos: Union[
Optional[List[Union[UserPhoto, Dict[str, Any]]]], DefaultArg
] = NotGiven,
profile_url: Union[Optional[str], DefaultArg] = NotGiven,
roles: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
schemas: Union[Optional[List[str]], DefaultArg] = NotGiven,
timezone: Union[Optional[str], DefaultArg] = NotGiven,
title: Union[Optional[str], DefaultArg] = NotGiven,
user_name: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.active = active
self.addresses = ( # type: ignore
[a if isinstance(a, UserAddress) else UserAddress(**a) for a in addresses]
if _is_iterable(addresses)
else addresses
)
self.display_name = display_name
self.emails = ( # type: ignore
[a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in emails]
if _is_iterable(emails)
else emails
)
self.external_id = external_id
self.groups = ( # type: ignore
[a if isinstance(a, UserGroup) else UserGroup(**a) for a in groups]
if _is_iterable(groups)
else groups
)
self.id = id
self.meta = ( # type: ignore
UserMeta(**meta) if meta is not None and isinstance(meta, dict) else meta
)
self.name = ( # type: ignore
UserName(**name) if name is not None and isinstance(name, dict) else name
)
self.nick_name = nick_name
self.phone_numbers = ( # type: ignore
[
a if isinstance(a, TypeAndValue) else TypeAndValue(**a)
for a in phone_numbers
]
if _is_iterable(phone_numbers)
else phone_numbers
)
self.photos = ( # type: ignore
[a if isinstance(a, UserPhoto) else UserPhoto(**a) for a in photos]
if _is_iterable(photos)
else photos
)
self.profile_url = profile_url
self.roles = ( # type: ignore
[a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in roles]
if _is_iterable(roles)
else roles
)
self.schemas = schemas
self.timezone = timezone
self.title = title
self.user_name = user_name
self.unknown_fields = kwargs
def to_dict(self):
return _to_dict_without_not_given(self)
def __repr__(self):
return f"<slack_sdk.scim.{self.__class__.__name__}: {self.to_dict()}>"
|
crowdsourcing/permissions/user.py | Kyeongan/crowdsource-platform | 138 | 36743 | <reponame>Kyeongan/crowdsource-platform
from rest_framework import permissions
from csp import settings
from rest_framework.exceptions import PermissionDenied
class IsWorker(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.profile.is_worker
class IsRequester(permissions.BasePermission):
def has_object_permission(self, request, view, object):
return request.user.profile.is_requester
class CanCreateAccount(permissions.BasePermission):
def has_permission(self, request, view):
if view.action == 'create' and not (request.user.is_staff or settings.REGISTRATION_ALLOWED):
raise PermissionDenied(detail='We are currently in closed beta. '
'If you\'d like an account, email <EMAIL> '
'with a short description of what you\'d like to use Daemo for.')
return True
|
run_w2v.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 133 | 36768 | <reponame>hugochan/K-Competitive-Autoencoder-for-Text-Analytics<gh_stars>100-1000
'''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import argparse
from os import path
import timeit
import numpy as np
from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v
from autoencoder.baseline.doc_word2vec import doc_word2vec
from autoencoder.utils.io_utils import load_json, dump_json, write_file
from autoencoder.preprocessing.preprocessing import load_corpus
# from autoencoder.datasets.reuters import CorpusIterReuters
from autoencoder.datasets.the20news import CorpusIter20News
# from autoencoder.datasets.movie_review_data import CorpusIterMRD
# from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus
def train(args):
vocab = load_json(args.vocab)
# import pdb;pdb.set_trace()
# load corpus
corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False)
# corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
# corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
# corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False)
# print len([1 for x in corpus])
corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus)
w2v = Word2Vec(args.n_dim, window=args.window_size, \
negative=args.negative, epoches=args.n_epoch)
start = timeit.default_timer()
w2v.train(corpus_iter)
print 'runtime: %ss' % (timeit.default_timer() - start)
save_w2v(w2v.model, args.save_model)
import pdb;pdb.set_trace()
def test(args):
corpus = load_corpus(args.corpus[0])
docs, vocab_dict = corpus['docs'], corpus['vocab']
doc_codes = doc_word2vec(docs, revdict(vocab_dict), args.load_model, args.output, avg=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true', help='train flag')
parser.add_argument('--corpus', nargs='*', required=True, type=str, help='path to the corpus dir (in training phase) or file (in test phase)')
parser.add_argument('-doc', '--docnames', type=str, help='path to the docnames file (in training phase)')
parser.add_argument('--vocab', required=True, type=str, help='path to the vocab file')
parser.add_argument('-ne', '--n_epoch', required=True, type=int, help='num of epoches')
parser.add_argument('-nd', '--n_dim', type=int, help='num of dimensions')
parser.add_argument('-ws', '--window_size', required=True, type=int, help='window size')
parser.add_argument('-neg', '--negative', required=True, type=int, help='num of negative samples')
parser.add_argument('-sm', '--save_model', type=str, default='w2v.mod', help='path to the output model')
parser.add_argument('-lm', '--load_model', type=str, help='path to the trained model')
parser.add_argument('-o', '--output', type=str, help='path to the output doc codes file')
args = parser.parse_args()
if args.train:
if not args.n_dim:
raise Exception('n_dim arg needed in training phase')
train(args)
else:
if not args.output:
raise Exception('output arg needed in test phase')
if not args.load_model:
raise Exception('load_model arg needed in test phase')
test(args)
if __name__ == '__main__':
main()
|
loaner/deployments/lib/password.py | gng-demo/travisfix | 175 | 36775 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This library provides a random password generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import flags
from absl import logging
_MIN = 8
_MAX = 100
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'password_length', _MAX,
'The length of the password to be generated for the Grab n Go Role Account.'
'\nNOTE: The length must be between 8 and 100 and must be compliant with '
'the G Suite Admin password settings.\nThe Security Settings can be found '
'in the Google Admin console: admin.google.com'
)
flags.register_validator(
'password_length', lambda length: length >= _MIN and length <= _MAX,
'Password length must be between {} and {} characters.'.format(_MIN, _MAX),
)
def generate(length):
"""Generates a new password of a given length.
Args:
length: int, the length of the password to generate.
Returns:
A random password of type string with the given length.
Raises:
ValueError: if the length provided is invalid.
"""
if length < _MIN or length > _MAX:
raise ValueError(
'password length must be between {!r} and {!r} characters length '
'provided was: {!r}'.format(_MIN, _MAX, length))
logging.debug('Generating a password with length: %r.', length)
chars = (
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'!$%^&*()-_=+@:;~#,.<>? '
)
password = ''
rand = random.SystemRandom()
while len(password) < length:
password += rand.choice(chars)
return password
|
openmdao/utils/tests/test_cs_safe.py | friedenhe/OpenMDAO | 451 | 36793 | <gh_stars>100-1000
import numpy as np
import unittest
from openmdao.utils import cs_safe
from openmdao.utils.assert_utils import assert_near_equal
class TestCSSafeFuctions(unittest.TestCase):
def test_abs(self):
test_data = np.array([1, -1, -2, 2, 5.675, -5.676], dtype='complex')
assert_near_equal(cs_safe.abs(test_data), np.abs(test_data))
test_data += complex(0,1e-50)
cs_derivs = cs_safe.abs(test_data).imag/1e-50
expected = [1, -1, -1, 1, 1, -1]
assert_near_equal(cs_derivs, expected)
def test_norm(self):
test_data = np.array([[1, 2, 3, -4],[5, 6, 7, -8]], dtype='complex')
assert_near_equal(cs_safe.norm(test_data,axis=None), np.linalg.norm(test_data,axis=None))
assert_near_equal(cs_safe.norm(test_data,axis=0), np.linalg.norm(test_data,axis=0))
assert_near_equal(cs_safe.norm(test_data,axis=1), np.linalg.norm(test_data,axis=1))
deriv_test_data = test_data.copy()
deriv_test_data[0,0] += complex(0, 1e-50)
cs_deriv = cs_safe.norm(deriv_test_data).imag/1e-50
expected = 1/np.linalg.norm(test_data) * test_data[0,0].real
assert_near_equal(cs_deriv, expected)
def test_arctan2(self):
x = np.array([-1, +1, +1, -1], dtype='complex')
y = np.array([-1, -1, +1, +1], dtype='complex')
expected = np.array([-2.35619449, -0.78539816, 0.78539816, 2.35619449])
assert_near_equal(cs_safe.arctan2(y, x), expected, tolerance=1e-8)
x += complex(0,1e-50)
y += complex(0,1e-50)
cs_derivs = cs_safe.arctan2(y, x).imag/1e-50
expected = [0., 1., 0., -1.]
assert_near_equal(cs_derivs, expected)
if __name__ == "__main__":
unittest.main() |
main.py | tuzhucheng/sent-sim | 109 | 36797 | """
Driver program for training and evaluation.
"""
import argparse
import logging
import numpy as np
import random
import torch
import torch.optim as O
from datasets import get_dataset, get_dataset_configurations
from models import get_model
from runners import Runner
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sentence similarity models')
parser.add_argument('--model', default='sif', choices=['sif', 'mpcnn', 'mpcnn-lite', 'bimpm'], help='Model to use')
parser.add_argument('--dataset', default='sick', choices=['sick', 'wikiqa'], help='Dataset to use')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size')
parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')
parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')
parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')
parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')
# Special options for SIF model
parser.add_argument('--unsupervised', action='store_true', default=False, help='Set this flag to use unsupervised mode.')
parser.add_argument('--alpha', type=float, default=1e-3, help='Smoothing term for smooth inverse frequency baseline model')
parser.add_argument('--no-remove-special-direction', action='store_true', default=False, help='Set to not remove projection onto first principal component')
parser.add_argument('--frequency-dataset', default='enwiki', choices=['train', 'enwiki'])
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)
model = get_model(args, dataset_cls, embedding)
if args.model == 'sif':
model.populate_word_frequency_estimation(train_loader)
total_params = 0
for param in model.parameters():
size = [s for s in param.size()]
total_params += np.prod(size)
logger.info('Total number of parameters: %s', total_params)
loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)
optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)
runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)
runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)
|
amazon_main_xgboost.py | twankim/ensemble_amazon | 236 | 36814 | """ Amazon Access Challenge Code for ensemble
<NAME> script for Amazon .
xgboost on input data
based on <NAME>'s Script.
"""
from __future_
_ import division
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
import XGBoostClassifier as xg
from sklearn.cross_validation import StratifiedKFold
SEED = 42 # always use a seed for randomized procedures
def load_data(filename, use_labels=True):
"""
Load data from CSV files and return them as numpy arrays
The use_labels parameter indicates whether one should
read the first column (containing class labels). If false,
return all 0s.
"""
# load column 1 to 8 (ignore last one)
data = np.loadtxt(open( filename), delimiter=',',
usecols=range(1, 9), skiprows=1)
if use_labels:
labels = np.loadtxt(open( filename), delimiter=',',
usecols=[0], skiprows=1)
else:
labels = np.zeros(data.shape[0])
return labels, data
def save_results(predictions, filename):
"""Given a vector of predictions, save results in CSV format."""
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
def bagged_set(X_t,y_c,model, seed, estimators, xt, update_seed=True):
# create array object to hold predictions
baggedpred=[ 0.0 for d in range(0, (xt.shape[0]))]
#loop for as many times as we want bags
for n in range (0, estimators):
#shuff;e first, aids in increasing variance and forces different results
#X_t,y_c=shuffle(Xs,ys, random_state=seed+n)
if update_seed: # update seed if requested, to give a slightly different model
model.set_params(random_state=seed + n)
model.fit(X_t,y_c) # fit model0.0917411475506
preds=model.predict_proba(xt)[:,1] # predict probabilities
# update bag's array
for j in range (0, (xt.shape[0])):
baggedpred[j]+=preds[j]
# divide with number of bags to create an average estimate
for j in range (0, len(baggedpred)):
baggedpred[j]/=float(estimators)
# return probabilities
return np.array(baggedpred)
# using numpy to print results
def printfilcsve(X, filename):
np.savetxt(filename,X)
def main():
"""
Fit models and make predictions.
We'll use one-hot encoding to transform our categorical features
into binary features.
y and X will be numpy array objects.
"""
filename="main_xgboost" # nam prefix
#model = linear_model.LogisticRegression(C=3) # the classifier we'll use
model=xg.XGBoostClassifier(num_round=1000 ,nthread=25, eta=0.12, gamma=0.01,max_depth=12, min_child_weight=0.01, subsample=0.6,
colsample_bytree=0.7,objective='binary:logistic',seed=1)
# === load data in memory === #
print "loading data"
y, X = load_data('train.csv')
y_test, X_test = load_data('test.csv', use_labels=False)
# === one-hot encoding === #
# we want to encode the category IDs encountered both in
# the training and the test set, so we fit the encoder on both
encoder = preprocessing.OneHotEncoder()
encoder.fit(np.vstack((X, X_test)))
X = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)
X_test = encoder.transform(X_test)
# if you want to create new features, you'll need to compute them
# before the encoding, and append them to your dataset after
#create arrays to hold cv an dtest predictions
train_stacker=[ 0.0 for k in range (0,(X.shape[0])) ]
# === training & metrics === #
mean_auc = 0.0
bagging=20 # number of models trained with different seeds
n = 5 # number of folds in strattified cv
kfolder=StratifiedKFold(y, n_folds= n,shuffle=True, random_state=SEED)
i=0
for train_index, test_index in kfolder: # for each train and test pair of indices in the kfolder object
# creaning and validation sets
X_train, X_cv = X[train_index], X[test_index]
y_train, y_cv = np.array(y)[train_index], np.array(y)[test_index]
#print (" train size: %d. test size: %d, cols: %d " % ((X_train.shape[0]) ,(X_cv.shape[0]) ,(X_train.shape[1]) ))
# if you want to perform feature selection / hyperparameter
# optimization, this is where you want to do it
# train model and make predictions
preds=bagged_set(X_train,y_train,model, SEED , bagging, X_cv, update_seed=True)
# compute AUC metric for this CV fold
roc_auc = roc_auc_score(y_cv, preds)
print "AUC (fold %d/%d): %f" % (i + 1, n, roc_auc)
mean_auc += roc_auc
no=0
for real_index in test_index:
train_stacker[real_index]=(preds[no])
no+=1
i+=1
mean_auc/=n
print (" Average AUC: %f" % (mean_auc) )
print (" printing train datasets ")
printfilcsve(np.array(train_stacker), filename + ".train.csv")
# === Predictions === #
# When making predictions, retrain the model on the whole training set
preds=bagged_set(X, y,model, SEED, bagging, X_test, update_seed=True)
#create submission file
printfilcsve(np.array(preds), filename+ ".test.csv")
#save_results(preds, filename+"_submission_" +str(mean_auc) + ".csv")
if __name__ == '__main__':
main()
|
notebooks/data_cleaning/track_meta.py | roannav/learntools | 359 | 36845 | track = dict(
author_username='alexisbcook',
course_name='Data Cleaning',
course_url='https://www.kaggle.com/learn/data-cleaning',
course_forum_url='https://www.kaggle.com/learn-forum/172650'
)
lessons = [ {'topic': topic_name} for topic_name in
['Handling missing values', #1
'Scaling and normalization', #2
'Parsing dates', #3
'Character encodings', #4
'Inconsistent data entry'] #5
]
notebooks = [
dict(
filename='tut1.ipynb',
lesson_idx=0,
type='tutorial',
dataset_sources=['maxhorowitz/nflplaybyplay2009to2016'],
),
dict(
filename='ex1.ipynb',
lesson_idx=0,
type='exercise',
dataset_sources=['aparnashastry/building-permit-applications-data'],
scriptid=10824396
),
dict(
filename='tut2.ipynb',
lesson_idx=1,
type='tutorial',
),
dict(
filename='ex2.ipynb',
lesson_idx=1,
type='exercise',
dataset_sources=['kemical/kickstarter-projects'],
scriptid=10824404
),
dict(
filename='tut3.ipynb',
lesson_idx=2,
type='tutorial',
dataset_sources=['nasa/landslide-events']
),
dict(
filename='ex3.ipynb',
lesson_idx=2,
type='exercise',
dataset_sources=['usgs/earthquake-database', 'smithsonian/volcanic-eruptions'],
scriptid=10824403
),
dict(
filename='tut4.ipynb',
lesson_idx=3,
type='tutorial',
dataset_sources=['kemical/kickstarter-projects']
),
dict(
filename='ex4.ipynb',
lesson_idx=3,
type='exercise',
dataset_sources=['kwullum/fatal-police-shootings-in-the-us'],
scriptid=10824401
),
dict(
filename='tut5.ipynb',
lesson_idx=4,
type='tutorial',
dataset_sources=['alexisbcook/pakistan-intellectual-capital']
),
dict(
filename='ex5.ipynb',
lesson_idx=4,
type='exercise',
dataset_sources=['alexisbcook/pakistan-intellectual-capital'],
scriptid=10824407
),
] |
bookwyrm/migrations/0145_sitesettings_version.py | mouse-reeve/fedireads | 270 | 36847 | <reponame>mouse-reeve/fedireads
# Generated by Django 3.2.12 on 2022-03-16 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0144_alter_announcement_display_type"),
]
operations = [
migrations.AddField(
model_name="sitesettings",
name="version",
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
installation/templates/configuration/auth.py | piwaniuk/critic | 216 | 36927 | <gh_stars>100-1000
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2013 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Accepted password hash schemes. They need to be supported by the passlib
# Python package; see http://packages.python.org/passlib for details.
PASSWORD_HASH_SCHEMES = %(installation.config.password_hash_schemes)r
# Default password hash scheme. Must be included in PASSWORD_HASH_SCHEMES.
DEFAULT_PASSWORD_HASH_SCHEME = %(installation.config.default_password_hash_scheme)r
# (Approximate) minimum password hash time in seconds. Higher means safer
# passwords (more difficult to decrypt using brute-force) but slower sign-in
# operation.
MINIMUM_PASSWORD_HASH_TIME = %(installation.config.minimum_password_hash_time)r
# Calibrated minimum rounds per password hash scheme.
MINIMUM_ROUNDS = %(installation.config.minimum_rounds)r
# External authentication providers.
PROVIDERS = {
# GitHub OAuth-based authentication.
"github": {
"enabled": %(installation.config.provider_github.enabled)r,
# Allow authenticated user to create a Critic user.
"allow_user_registration": %(installation.config.provider_github.allow_user_registration)r,
# Verify user email addresses provided by GitHub.
"verify_email_addresses": %(installation.config.provider_github.verify_email_addresses)r,
# Client ID and secret. These are generated by registering an
# application at https://github.com/settings/applications/new.
"client_id": %(installation.config.provider_github.client_id)r,
"client_secret": %(installation.config.provider_github.client_secret)r,
# Bypass /createuser on first sign in, creating a user automatically.
"bypass_createuser": %(installation.config.provider_github.bypass_createuser)r,
# Authentication callback URI. This same URI must be provided
# to GitHub when registering the application. The path
# component must be "/oauth/github".
"redirect_uri": %(installation.config.provider_github.redirect_uri)r
},
# Google OAuth-based authentication.
"google": {
"enabled": %(installation.config.provider_google.enabled)r,
# Allow authenticated user to create a Critic user.
"allow_user_registration": %(installation.config.provider_google.allow_user_registration)r,
# Verify user email addresses provided by Google.
"verify_email_addresses": %(installation.config.provider_google.verify_email_addresses)r,
# Client ID and secret. These are generated by creating a project at
# https://cloud.google.com/console/project, and then creating an OAuth2
# client id using the project administration UI.
"client_id": %(installation.config.provider_google.client_id)r,
"client_secret": %(installation.config.provider_google.client_secret)r,
# Bypass /createuser on first sign in, creating a user automatically.
"bypass_createuser": %(installation.config.provider_google.bypass_createuser)r,
# Authentication callback URI. This same URI must be provided
# to Google when creating the OAuth2 client id. The path
# component must be "/oauth/google".
"redirect_uri": %(installation.config.provider_google.redirect_uri)r
},
}
# Authentication databases.
DATABASES = {
# Using Critic's own user database for authentication.
"internal": {},
# Using an LDAP database for authentication.
"ldap": {
# Input fields.
#
# Each element is a tuple containing:
# [0]: True if the field should use <input type=password>
# [1]: Internal field identifier
# [2]: Field label
# [3]: (Optional) Longer description / help text
"fields": [
(False, "username", "Username:"),
(True, "password", "Password:"),
],
# LDAP server URL.
"url": "%(installation.config.ldap_url)s",
# Use TLS when connecting to LDAP server.
"use_tls": True,
# Credentials field.
#
# Identifier of the field whose value will be used as the credentials
# (e.g. password) in the bind request used for authentication.
"credentials": "password",
# The following two values are all interpreted as Python format strings
# that can reference field values, e.g. using "%%(username)s". The input
# values will have been escaped for safe usage in LDAP expressions.
# LDAP search base.
"search_base": "%(installation.config.ldap_search_base)s",
# LDAP search filter.
"search_filter": "(uid=%%(username)s)",
# The following settings control if and how Critic user records are
# created after successful authentication of a user.
# If true, Critic user records are created automatically if
# authentication succeeds but a matching record is not found.
"create_user": %(installation.config.ldap_create_user)r,
# User name LDAP attribute.
#
# This is the LDAP attribute whose value is used as the Critic username,
# both when looking for an existing user record and when creating a new
# one (if one isn't found.)
#
# If the attribute is missing or empty it will be considered an
# authentication error.
"username_attribute": "%(installation.config.ldap_username_attribute)s",
# Full name LDAP attribute.
#
# This is the LDAP attribute to use as the (initial) full name when
# creating a new Critic user record. It is not used if an existing user
# record is found.
#
# If the attribute is missing or empty, the user is created with the
# username as full name.
"fullname_attribute": "%(installation.config.ldap_fullname_attribute)s",
# Email LDAP attribute.
#
# This is the LDAP attribute to use as the (initial) primary email
# address when creating a new Critic user record. It is not used if an
# existing user record is found.
#
# If the attribute is missing or empty, the user is created with no
# primary email address.
"email_attribute": "%(installation.config.ldap_email_attribute)s",
# List of required LDAP groups.
#
# If the list is empty, no group membership is required.
"require_groups": [
# {
# # Distinguished name of the required group.
# "dn": "cn=SomeGroup,ou=Groups,dc=example,dc=com",
#
# # Group attribute containing the list of members.
# "members_attribute": "memberUid",
#
# # Value to search for in the list of members.
# #
# # The value is interpreted as a Python format string, and can
# # reference field values. It can also reference the
# # distinguished name of the user signing in as "%%(dn)s".
# "member_value": "%%(username)s",
# },
],
# Maximum age of cached successful authentication attempts, in seconds.
# If set to zero, caching is disabled altogether.
"cache_max_age": %(installation.config.ldap_cache_max_age)r,
},
}
DATABASE = %(installation.config.auth_database)r
ENABLE_ACCESS_TOKENS = %(installation.config.enable_access_tokens)r
|
elliot/utils/read.py | gategill/elliot | 175 | 36960 | <reponame>gategill/elliot<filename>elliot/utils/read.py
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import pandas as pd
import configparser
import pickle
import numpy as np
import os
from types import SimpleNamespace
def read_csv(filename):
"""
Args:
filename (str): csv file path
Return:
A pandas dataframe.
"""
df = pd.read_csv(filename, index_col=False)
return df
def read_np(filename):
"""
Args:
filename (str): filename of numpy to load
Return:
The loaded numpy.
"""
return np.load(filename)
def read_imagenet_classes_txt(filename):
"""
Args:
filename (str): txt file path
Return:
A list with 1000 imagenet classes as strings.
"""
with open(filename) as f:
idx2label = eval(f.read())
return idx2label
def read_config(sections_fields):
"""
Args:
sections_fields (list): list of fields to retrieve from configuration file
Return:
A list of configuration values.
"""
config = configparser.ConfigParser()
config.read('./config/configs.ini')
configs = []
for s, f in sections_fields:
configs.append(config[s][f])
return configs
def read_multi_config():
"""
It reads a config file that contains the configuration parameters for the recommendation systems.
Return:
A list of configuration settings.
"""
config = configparser.ConfigParser()
config.read('./config/multi.ini')
configs = []
for section in config.sections():
single_config = SimpleNamespace()
single_config.name = section
for field, value in config.items(section):
single_config.field = value
configs.append(single_config)
return configs
def load_obj(name):
"""
Load the pkl object by name
:param name: name of file
:return:
"""
with open(name, 'rb') as f:
return pickle.load(f)
def find_checkpoint(dir, restore_epochs, epochs, rec, best=0):
"""
:param dir: directory of the model where we start from the reading.
:param restore_epochs: epoch from which we start from.
:param epochs: epochs from which we restore (0 means that we have best)
:param rec: recommender model
:param best: 0 No Best - 1 Search for the Best
:return:
"""
if best:
for r, d, f in os.walk(dir):
for file in f:
if 'best-weights-'.format(restore_epochs) in file:
return dir + file.split('.')[0]
return ''
if rec == "apr" and restore_epochs < epochs:
# We have to restore from an execution of bprmf
dir_stored_models = os.walk('/'.join(dir.split('/')[:-2]))
for dir_stored_model in dir_stored_models:
if 'bprmf' in dir_stored_model[0]:
dir = dir_stored_model[0] + '/'
break
for r, d, f in os.walk(dir):
for file in f:
if 'weights-{0}-'.format(restore_epochs) in file:
return dir + file.split('.')[0]
return ''
|
lib/python/batch_sim/gcloud_fakes.py | leozz37/makani | 1,178 | 36976 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake gcloud utils for testing without cloud access."""
from makani.lib.python.batch_sim import gcloud_util
class FakeFilesystem(object):
"""A fake filesystem.
A FakeFilesystem instance is simply a dictionary of file names to file
contents, with Save() and Load() methods to make access look a bit more
file-like.
The class itself also contains LOCAL and CLOUD variables intended to store
references to particular FakeFilesystem instances. These are initialized to
None and intended to be defined as needed via mock.patch. For example:
with mock.patch('makani.batch_sim.gcloud_fakes.FakeFilesystem.LOCAL',
FakeFilesystem()) as local_fs:
<Do something with local files>
with mock.patch('makani.batch_sim.gcloud_fakes.FakeFilesystem.CLOUD',
FakeFilesystem()) as remote_fs:
<Do something with remote files>
In particular, many of the fakes in this module use FakeFilesystem.LOCAL and
FakeFilesystem.CLOUD to simulate actual storage patterns.
"""
LOCAL = None
CLOUD = None
def __init__(self):
self.files = {}
def Save(self, filename, descriptor):
self.files[filename] = descriptor
def Load(self, filename):
return self.files[filename]
class FakeCloudStorageApi(object):
"""A fake of gcloud_util.CloudStorageApi.
This performs simple transfers between FakeFilesystem.LOCAL and
FakeFilesystem.CLOUD.
To simulate working with different local filesystems, FakeFilesystem.LOCAL
may be patched before instantiating the FakeCloudStorageApi.
"""
def __init__(self, bucket=None):
self._local_fs = FakeFilesystem.LOCAL
self._cloud_fs = FakeFilesystem.CLOUD
self._bucket = bucket
def _RemoveBucketFromCloudName(self, cloud_name):
cloud_name = cloud_name.strip()
if cloud_name.startswith('gs://'):
_, cloud_name = gcloud_util.ParseBucketAndPath(cloud_name, None)
return cloud_name
def DownloadFile(self, cloud_name, stream):
cloud_name = self._RemoveBucketFromCloudName(cloud_name)
stream.write(self._cloud_fs.Load(cloud_name))
def UploadFile(self, local_name, cloud_name):
cloud_name = self._RemoveBucketFromCloudName(cloud_name)
self._cloud_fs.Save(cloud_name, self._local_fs.Load(local_name))
def UploadStream(self, stream, cloud_name):
cloud_name = self._RemoveBucketFromCloudName(cloud_name)
self._cloud_fs.Save(cloud_name, stream.getvalue())
def DeletePrefix(self, prefix):
for filename in self.List(prefix):
if filename.startswith(prefix):
self._cloud_fs.files.pop(filename)
def DeleteFile(self, cloud_name):
cloud_name = self._RemoveBucketFromCloudName(cloud_name)
self._cloud_fs.files.pop(cloud_name)
def List(self, prefix):
prefix = self._RemoveBucketFromCloudName(prefix)
return [name for name in self._cloud_fs.files if name.startswith(prefix)]
|
LeetCode/python3/136.py | ZintrulCre/LeetCode_Archiver | 279 | 36982 | class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 0
for n in nums:
k ^= n
return k |
deps/libffi/generate-osx-source-and-headers.py | liuqsqq/node-ffi | 3,373 | 36995 | #!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
desktop_sdk_info = sdkinfo('macosx')
def latest_sdks():
latest_desktop = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'OS X' in line:
latest_desktop = match.group(1)
return latest_desktop
desktop_sdk = latest_sdks()
class desktop_platform_32(Platform):
sdk='macosx'
arch = 'i386'
name = 'mac32'
triple = 'i386-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
class desktop_platform_64(Platform):
sdk='macosx'
arch = 'x86_64'
name = 'mac'
triple = 'x86_64-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
move_dir(arch='x86_64',
prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'osx/src', 'osx/include')
move_source_tree('include', None, 'osx/include')
build_target(desktop_platform_32)
build_target(desktop_platform_64)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('osx/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
|
src/pretix/base/templatetags/cache_large.py | Janfred/pretix | 1,248 | 36999 | <filename>src/pretix/base/templatetags/cache_large.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.templatetags.cache import CacheNode
register = Library()
class DummyNode(Node):
def __init__(self, nodelist, *args):
self.nodelist = nodelist
def render(self, context):
value = self.nodelist.render(context)
return value
@register.tag('cache_large')
def do_cache(parser, token):
nodelist = parser.parse(('endcache_large',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
if not settings.CACHE_LARGE_VALUES_ALLOWED:
return DummyNode(
nodelist,
)
return CacheNode(
nodelist, parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
Variable(repr(settings.CACHE_LARGE_VALUES_ALIAS)),
)
|
openbook_posts/migrations/0022_auto_20190311_1432.py | TamaraAbells/okuna-api | 164 | 37003 | <gh_stars>100-1000
# Generated by Django 2.2b1 on 2019-03-11 13:32
from django.db import migrations
import imagekit.models.fields
import openbook_posts.helpers
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0021_auto_20190309_1532'),
]
operations = [
migrations.AlterField(
model_name='postimage',
name='image',
field=imagekit.models.fields.ProcessedImageField(height_field='height', null=True, upload_to=openbook_posts.helpers.upload_to_post_image_directory, verbose_name='image', width_field='width'),
),
]
|
functions/include/serializer.py | xyclin/fluent | 1,164 | 37005 | # Copyright 2018 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cloudpickle as cp
import pyarrow as pa
import codecs
from io import BytesIO
import numpy as np
from .functions_pb2 import *
from . import shared
SER_FORMAT = 'raw_unicode_escape'
class Serializer():
def __init__(self):
raise NotImplementedError('Cannot instantiate abstract class.')
def _serialize(self, msg):
pass
def _deserialize(self, msg):
pass
def dump(self, msg):
pass
def load(self, msg):
pass
class DefaultSerializer(Serializer):
def __init__(self):
pass
def _serialize(msg):
return msg
def _deserialize(self, msg):
return msg
def dump(self, msg):
return cp.dumps(msg)
def load(self, msg):
return cp.loads(msg)
class StringSerializer(Serializer):
def __init__(self):
pass
def _serialize(self, msg):
return codecs.decode(msg, SER_FORMAT)
def _deserialize(self, msg):
return codecs.encode(msg, SER_FORMAT)
def dump(self, msg):
return self._serialize(cp.dumps(msg))
def load(self, msg):
return cp.loads(self._deserialize(msg))
# TODO: how can we make serializers pluggable?
class NumpySerializer(DefaultSerializer):
def __init__(self):
pass
def dump(self, msg):
return pa.serialize(msg).to_buffer().to_pybytes()
def load(self, msg):
return pa.deserialize(msg)
numpy_ser = NumpySerializer()
default_ser = DefaultSerializer()
string_ser = StringSerializer()
function_ser = default_ser
def get_serializer(kind):
global numpy_ser, default_ser, string_ser
if kind == NUMPY:
return numpy_ser
elif kind == STRING:
return string_ser
elif kind == DEFAULT:
return default_ser
else:
return default_ser
def serialize_val(val, valobj=None, serialize=True):
if not valobj:
valobj = Value()
if isinstance(val, shared.FluentFuture):
valobj.body = default_ser.dump(shared.FluentReference(val.obj_id,
True, LWW))
elif isinstance(val, np.ndarray):
valobj.body = numpy_ser.dump(val)
valobj.type = NUMPY
else:
valobj.body = default_ser.dump(val)
if not serialize:
return valobj
return valobj.SerializeToString()
def deserialize_val(val):
v = Value()
v.ParseFromString(val)
if v.type == DEFAULT:
return default_ser.load(v.body)
elif v.type == STRING:
return string_ser.load(v.body)
elif v.type == NUMPY:
return numpy_ser.load(v.body)
|
tests/integration/test_breakpoint_step.py | benjamintemitope/SublimeTextXdebug | 344 | 37009 | import os
try:
from xdebug.unittesting import XdebugDeferrableTestCase
except:
from SublimeTextXdebug.xdebug.unittesting import XdebugDeferrableTestCase
class TestBreakpointStep(XdebugDeferrableTestCase):
breakpoint_step_file = 'breakpoint_step.php'
breakpoint_step_file_local_path = os.path.join(XdebugDeferrableTestCase.local_path, breakpoint_step_file)
def test_step_into(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 11)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = <uninitialized>')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_into'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = <uninitialized>')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:4, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_into'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = (string) Hi')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
def test_step_out(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 5)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 5'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = (string) Hi')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_out'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = (string) Hello Stranger!')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
def test_step_over(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 11)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = <uninitialized>')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_over'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = (string) Hello Stranger!')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
|
icevision/models/ultralytics/yolov5/fastai/learner.py | ai-fast-track/mantisshrimp | 580 | 37011 | <filename>icevision/models/ultralytics/yolov5/fastai/learner.py
__all__ = ["learner"]
from icevision.imports import *
from icevision.engines.fastai import *
from icevision.models.ultralytics.yolov5.fastai.callbacks import Yolov5Callback
from yolov5.utils.loss import ComputeLoss
def learner(
dls: List[Union[DataLoader, fastai.DataLoader]],
model: nn.Module,
cbs=None,
**learner_kwargs,
):
"""Fastai `Learner` adapted for Yolov5.
# Arguments
dls: `Sequence` of `DataLoaders` passed to the `Learner`.
The first one will be used for training and the second for validation.
model: The model to train.
cbs: Optional `Sequence` of callbacks.
**learner_kwargs: Keyword arguments that will be internally passed to `Learner`.
# Returns
A fastai `Learner`.
"""
cbs = [Yolov5Callback()] + L(cbs)
compute_loss = ComputeLoss(model)
def loss_fn(preds, targets) -> Tensor:
return compute_loss(preds, targets)[0]
learn = adapted_fastai_learner(
dls=dls,
model=model,
cbs=cbs,
loss_func=loss_fn,
**learner_kwargs,
)
# HACK: patch AvgLoss (in original, find_bs looks at learn.yb which has shape (N, 6) - with N being number_of_objects_in_image * batch_size. So impossible to retrieve BS)
class Yolov5AvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
bs = len(learn.xb[0])
self.total += learn.to_detach(learn.loss.mean()) * bs
self.count += bs
recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
recorder.loss = Yolov5AvgLoss()
return learn
|
v0.5/training/image_classification/train.py | PhilippvK/tiny | 148 | 37012 | <gh_stars>100-1000
'''
MLCommons
group: TinyMLPerf (https://github.com/mlcommons/tiny)
image classification on cifar10
train.py desc: loads data, trains and saves model, plots training metrics
'''
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from keras.callbacks import LearningRateScheduler
from keras.utils import to_categorical
import keras_model
import datetime
EPOCHS = 500
BS = 32
# get date ant time to save model
dt = datetime.datetime.today()
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
#learning rate schedule
def lr_schedule(epoch):
initial_learning_rate = 0.001
decay_per_epoch = 0.99
lrate = initial_learning_rate * (decay_per_epoch ** epoch)
print('Learning rate = %f'%lrate)
return lrate
lr_scheduler = LearningRateScheduler(lr_schedule)
#optimizer
optimizer = tf.keras.optimizers.Adam()
#define data generator
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
#brightness_range=(0.9, 1.2),
#contrast_range=(0.9, 1.2),
validation_split=0.2
)
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, to_categorical(cifar_train_labels), \
cifar_test_data, cifar_test_filenames, to_categorical(cifar_test_labels), cifar_label_names
if __name__ == "__main__":
"""load cifar10 data and trains model"""
cifar_10_dir = 'cifar-10-batches-py'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
new_model = keras_model.resnet_v1_eembc()
new_model.summary()
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_data)
new_model.compile(
optimizer=optimizer, loss='categorical_crossentropy', metrics='accuracy', loss_weights=None,
weighted_metrics=None, run_eagerly=None )
# fits the model on batches with real-time data augmentation:
History = new_model.fit(datagen.flow(train_data, train_labels, batch_size=BS),
steps_per_epoch=len(train_data) / BS, epochs=EPOCHS, callbacks=[lr_scheduler])
plt.plot(np.array(range(EPOCHS)), History.history['loss'])
plt.plot(np.array(range(EPOCHS)), History.history['accuracy'])
plt.savefig('train_loss_acc.png')
model_name = "trainedResnet.h5"
new_model.save("trained_models/" + model_name)
|
src/tests/web/web_auth_utils_test.py | tomgilbertson/script-server-v1 | 833 | 37014 | from unittest import TestCase
from parameterized import parameterized
from tests.test_utils import mock_request_handler
from web.web_auth_utils import remove_webpack_suffixes, is_allowed_during_login
class WebpackSuffixesTest(TestCase):
def test_remove_webpack_suffixes_when_css(self):
normalized = remove_webpack_suffixes('js/chunk-login-vendors.59040343.css')
self.assertEqual('js/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_js(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js')
self.assertEqual('js/login.js', normalized)
def test_remove_webpack_suffixes_when_js_map(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js.map')
self.assertEqual('js/login.js.map', normalized)
def test_remove_webpack_suffixes_when_favicon(self):
normalized = remove_webpack_suffixes('favicon.123.ico')
self.assertEqual('favicon.123.ico', normalized)
def test_remove_webpack_suffixes_when_no_suffixes(self):
normalized = remove_webpack_suffixes('css/chunk-login-vendors.css')
self.assertEqual('css/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_no_extension(self):
normalized = remove_webpack_suffixes('data/some_file')
self.assertEqual('data/some_file', normalized)
class LoginResourcesTest(TestCase):
@parameterized.expand([
('/favicon.ico'),
('login.html'),
('/js/login.be16f278.js'),
('/js/login.be16f278.js.map'),
('/js/chunk-login-vendors.18e22e7f.js'),
('/js/chunk-login-vendors.18e22e7f.js.map'),
('/img/titleBackground_login.a6c36d4c.jpg'),
('/css/login.8e74be0f.css'),
('/fonts/roboto-latin-400.60fa3c06.woff'),
('/fonts/roboto-latin-400.479970ff.woff2'),
('/fonts/roboto-latin-500.020c97dc.woff2'),
('/fonts/roboto-latin-500.87284894.woff')
])
def test_is_allowed_during_login_when_allowed(self, resource):
request_handler = mock_request_handler(method='GET')
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertTrue(allowed, 'Resource ' + resource + ' should be allowed, but was not')
def test_is_allowed_during_login_when_prohibited(self):
request_handler = mock_request_handler(method='GET')
resource = 'admin.html'
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertFalse(allowed, 'Resource ' + resource + ' should NOT be allowed, but WAS')
|
src/adafruit_blinka/microcontroller/amlogic/s905x3/pin.py | Jcc99/Adafruit_Blinka | 294 | 37117 | """AmLogic s905x3 pin names"""
# pylint: disable=wildcard-import,unused-wildcard-import
from adafruit_blinka.microcontroller.amlogic.meson_g12_common.pin import *
|
system-test/testnet-automation-json-parser.py | Flawm/solana | 7,843 | 37125 | #!/usr/bin/env python3
import sys, json, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--empty_error", action="store_true", help="If present, do not print error message")
args = parser.parse_args()
data=json.load(sys.stdin)
if 'results' in data:
for result in data['results']:
if 'series' in result:
print(result['series'][0]['columns'][1] + ': ' + str(result['series'][0]['values'][0][1]))
elif not args.empty_error:
print("An expected result from CURL request is missing")
elif not args.empty_error:
print("No results returned from CURL request")
|
Hackerrank/sherlockAndCost.py | nandani99/Hacktoberfest-1 | 255 | 37166 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the cost function below.
def cost(b):
n=len(b)
l, h = 0, 0
for i in range(1, n):
l, h = (max(l, h + b[i - 1] - 1),
max(l + b[i] - 1, h + abs(b[i] - b[i - 1])))
return max(l, h)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
B = list(map(int, input().rstrip().split()))
result = cost(B)
fptr.write(str(result) + '\n')
fptr.close()
|
test_python_toolbox/test_cheat_hashing.py | hboshnak/python_toolbox | 119 | 37187 | <reponame>hboshnak/python_toolbox
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''Testing module for `python_toolbox.abc_tools.AbstractStaticMethod`.'''
import copy
from python_toolbox.cheat_hashing import cheat_hash
def test_cheat_hash():
'''Test `cheat_hash` on various objects.'''
things = [
1,
7,
4.5,
[1, 2, 3.4],
(1, 2, 3.4),
{1: 2, 3: 4.5},
{1, 2, 3.4},
[1, [1, 2], 3],
[1, {frozenset((1, 2)): 'meow'}, 3],
sum,
None,
(None, {None: None})
]
things_copy = copy.deepcopy(things)
for thing, thing_copy in zip(things, things_copy):
assert cheat_hash(thing) == cheat_hash(thing) == \
cheat_hash(thing_copy) == cheat_hash(thing_copy)
|
pysnmp/hlapi/v1arch/asyncore/ntforg.py | RKinsey/pysnmp | 492 | 37204 | <reponame>RKinsey/pysnmp
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.hlapi.v1arch.auth import *
from pysnmp.hlapi.v1arch.asyncore import *
from pysnmp.hlapi.varbinds import *
from pysnmp.smi.rfc1902 import *
from pysnmp.proto.api import v2c
from pysnmp.proto.proxy import rfc2576
from pysnmp import error
__all__ = ['sendNotification']
VB_PROCESSOR = NotificationOriginatorVarBinds()
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher()
"""
sysUpTime = v2c.apiTrapPDU.sysUpTime
snmpTrapOID = v2c.apiTrapPDU.snmpTrapOID
def _ensureVarBinds(varBinds):
# Add sysUpTime if not present already
if not varBinds or varBinds[0][0] != sysUpTime:
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime), v2c.TimeTicks(0)))
# Search for and reposition sysUpTime if it's elsewhere
for idx, varBind in enumerate(varBinds[1:]):
if varBind[0] == sysUpTime:
varBinds[0] = varBind
del varBinds[idx + 1]
break
if len(varBinds) < 2:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
# Search for and reposition snmpTrapOID if it's elsewhere
for idx, varBind in enumerate(varBinds[2:]):
if varBind[0] == snmpTrapOID:
del varBinds[idx + 2]
if varBinds[1][0] == snmpTrapOID:
varBinds[1] = varBind
else:
varBinds.insert(1, varBind)
break
# Fail on missing snmpTrapOID
if varBinds[1][0] != snmpTrapOID:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
return varBinds
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if not cbFun:
return
if errorIndication:
cbFun(errorIndication, v2c.Integer(0), v2c.Integer(0), None,
cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle)
return
errorStatus = v2c.apiTrapPDU.getErrorStatus(rspPdu)
errorIndex = v2c.apiTrapPDU.getErrorIndex(rspPdu)
varBinds = v2c.apiTrapPDU.getVarBinds(rspPdu)
if lookupMib:
varBinds = VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, varBinds)
nextStateHandle = v2c.getNextRequestID()
nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBinds,
cbCtx=cbCtx,
snmpDispatcher=snmpDispatcher,
stateHandle=stateHandle,
nextStateHandle=nextStateHandle)
if not nextVarBinds:
return
v2c.apiTrapPDU.setRequestID(reqPdu, nextStateHandle)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(nextVarBinds))
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
lookupMib, cbFun, cbCtx = [options.get(x) for x in ('lookupMib', 'cbFun', 'cbCtx')]
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
if notifyType == 'trap':
reqPdu = v2c.TrapPDU()
else:
reqPdu = v2c.InformRequestPDU()
v2c.apiTrapPDU.setDefaults(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, varBinds)
varBinds = v2c.apiTrapPDU.getVarBinds(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(varBinds))
if authData.mpModel == 0:
reqPdu = rfc2576.v2ToV1(reqPdu)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
|
src/embedding/utilslib/baidu_spider_threads.py | mykiscool/DeepCamera | 914 | 37246 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import re
import urllib
import json
import socket
import time
import multiprocessing
from multiprocessing.dummy import Pool
from multiprocessing import Queue
import requests
timeout = 5
socket.setdefaulttimeout(timeout)
class Image(object):
"""图片类,保存图片信息"""
def __init__(self, url, save_path, referer):
super(Image, self).__init__()
self.url = url
self.save_path = save_path
self.referer = referer
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.dirpath = dirpath
self.time_sleep = t
self.pool = Pool(30)
self.session = requests.Session()
self.session.headers = Crawler.headers
self.queue = Queue()
self.delay = 1.5 # 网络请求太频繁会被封
self.__down_counter = 1
# 获取后缀名
@staticmethod
def __get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取前缀
@staticmethod
def __get_prefix(name):
return name[:name.find('.')]
# 保存图片
def __resolve_img_url(self, rsp_data, referer):
imgs = []
for image_info in rsp_data['imgs']:
fix = self.__get_suffix(image_info['objURL'])
local_path = os.path.join(self.__work_path, str(self.__counter) + str(fix))
image = Image(image_info['objURL'], local_path, referer)
imgs.append(image)
print("图片+1,已有" + str(self.__down_counter) + "张")
self.__down_counter += 1
self.__counter += 1
self.queue.put(imgs)
return
# 开始获取
def __resolve_json(self, word=''):
search = urllib.quote(word)
# pn 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 沿用session防ban
try:
time.sleep(self.delay)
req = self.session.get(url=url, timeout=15)
rsp = req.text
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except requests.exceptions.RequestException as e:
print(e)
print("-----Error:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
try:
rsp_data = json.loads(rsp)
self.__resolve_img_url(rsp_data, url)
except ValueError:
pass
# 读取下一页
print("读取下一页json")
pn += 60
print("解析json完成")
return
def __downImg(self, img):
"""下载单张图片,传入的是Image对象"""
# try:
# time.sleep(self.delay)
# urllib.urlretrieve(img.url, img.save_path)
# except requests.exceptions.HTTPError as e:
# print(e)
# except Exception as err:
# time.sleep(1)
# print(err)
# print("产生未知错误,放弃保存")
imgUrl = img.url
# self.messageQueue.put("线程 %s 正在下载 %s " %
# (threading.current_thread().name, imgUrl))
try:
time.sleep(self.delay)
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
headers['Referer'] = img.referer
res = requests.get(imgUrl, headers=headers, timeout=15)
with open(img.save_path, "wb") as f:
f.write(res.content)
except Exception as e:
message = "抛出异常: %s%s" % (imgUrl, str(e))
print(message)
def start(self, index, word, spider_page_num=1, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page: 起始页数
:return:
"""
self.__work_path = os.path.join(self.dirpath, index)
if not os.path.exists(self.__work_path):
os.mkdir(self.__work_path)
self.__counter = len(os.listdir(self.__work_path)) + 1 # 判断本地名字是否重复,获取目录下图片数
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.__resolve_json(word)
while self.queue.qsize():
imgs = self.queue.get()
self.pool.map_async(self.__downImg, imgs)
self.pool.close()
self.pool.join()
print('完成保存')
if __name__ == '__main__':
dirpath = os.path.join(sys.path[0], 'results')
if not os.path.exists(dirpath):
os.mkdir(dirpath)
with open('name.json') as f:
json_data = json.load(f)
# word = str(input("请输入图片关键字: \n"))
sort_data = sorted([(int(k), v) for k, v in json_data.items()])
print('开始')
for index, name in sort_data:
folder = str(index)
person = name.encode('utf-8')
print('开始抓取 {}:{}'.format(folder, person))
if folder in os.listdir('./results'):
print('已存在, continue')
continue
crawler = Crawler(0.05)
crawler.dirpath = dirpath
crawler.start(folder, person, 2, 1)
|
libvis/scripts/LMOptimizer SE3Optimization Test Jacobian derivation.py | zimengjiang/badslam | 541 | 37266 | from sympy import *
# Implementation of QuaternionBase<Derived>::toRotationMatrix(void).
# The quaternion q is given as a list [qw, qx, qy, qz].
def QuaternionToRotationMatrix(q):
tx = 2 * q[1]
ty = 2 * q[2]
tz = 2 * q[3]
twx = tx * q[0]
twy = ty * q[0]
twz = tz * q[0]
txx = tx * q[1]
txy = ty * q[1]
txz = tz * q[1]
tyy = ty * q[2]
tyz = tz * q[2]
tzz = tz * q[3]
return Matrix([[1 - (tyy + tzz), txy - twz, txz + twy],
[txy + twz, 1 - (txx + tzz), tyz - twx],
[txz - twy, tyz + twx, 1 - (txx + tyy)]])
# Implementation of SO3Group<Scalar> expAndTheta().
# Only implementing the first case (of very small rotation) since we take the Jacobian at zero.
def SO3exp(omega):
theta = omega.norm()
theta_sq = theta**2
half_theta = theta / 2
theta_po4 = theta_sq * theta_sq
imag_factor = Rational(1, 2) - Rational(1, 48) * theta_sq + Rational(1, 3840) * theta_po4;
real_factor = 1 - Rational(1, 2) * theta_sq + Rational(1, 384) * theta_po4;
# return SO3Group<Scalar>(Eigen::Quaternion<Scalar>(
# real_factor, imag_factor * omega.x(), imag_factor * omega.y(),
# imag_factor * omega.z()));
qw = real_factor
qx = imag_factor * omega[0]
qy = imag_factor * omega[1]
qz = imag_factor * omega[2]
return QuaternionToRotationMatrix([qw, qx, qy, qz])
# Implementation of SE3Group<Scalar> exp().
# Only implementing the first case (of small rotation) since we take the Jacobian at zero.
def SE3exp(tangent):
omega = Matrix(tangent[3:6])
V = SO3exp(omega)
rotation = V
translation = V * Matrix(tangent[0:3])
return rotation.row_join(translation)
# Main
init_printing(use_unicode=True)
print('Variant 1')
print('')
# Define the tangent vector with symbolic elements T_0 to T_5.
# (For a matrix, use: Matrix(3, 1, lambda i,j:var('S_%d%d' % (i,j))) )
T = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Compute transformation matrix from tangent vector.
T_matrix = SE3exp(T)
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# Matrix-vector multiplication with homogeneous vector:
result = T_matrix * S.col_join(Matrix([1]))
# Compute Jacobian:
# (Note: The transpose is needed for stacking the matrix columns (instead of rows) into a vector.)
jac = result.transpose().reshape(result.rows * result.cols, 1).jacobian(T)
# Take Jacobian at zero:
jac_subs = jac.subs([(T[0], 0), (T[1], 0), (T[2], 0), (T[3], 0), (T[4], 0), (T[5], 0)])
# Simplify and output:
jac_subs_simple = simplify(jac_subs)
pprint(jac_subs_simple)
print('')
print('')
print('Variant 2')
print('')
# Treat the function of which we want to determine the derivative as a list of nested functions.
# This makes it easier to compute the derivative of each part, simplify it, and concatenate the results
# using the chain rule.
### Define the function of which the Jacobian shall be taken ###
# Matrix-vector multiplication with homogeneous vector:
def MatrixVectorMultiplyHomogeneous(matrix, vector):
return matrix * vector.col_join(Matrix([1]))
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# The list of nested functions. They will be evaluated from right to left
# (this is to match the way they would be written in math: f(g(x)).)
functions = [lambda matrix : MatrixVectorMultiplyHomogeneous(matrix, S), SE3exp]
### Define the variables wrt. to take the Jacobian, and the position for evaluation ###
# Chain rule:
# d(f(g(x))) / dx = (df/dy)(g(x)) * dg/dx
# Define the parameter with respect to take the Jacobian, y in the formula above:
parameters = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Set the position at which to take the Jacobian, g(x) in the formula above:
parameter_values = zeros(6, 1)
### Automatic Jacobian calculation, no need to modify anything beyond this point ###
# Jacobian from previous step, dg/dx in the formula above:
previous_jacobian = 1
# TODO: Test whether this works with non-matrix functions.
def ComputeValueAndJacobian(function, parameters, parameter_values):
# Evaluate the function.
values = function(parameter_values)
# Compute the Jacobian.
symbolic_values = function(parameters)
symbolic_values_vector = symbolic_values.transpose().reshape(symbolic_values.rows * symbolic_values.cols, 1)
parameters_vector = parameters.transpose().reshape(parameters.rows * parameters.cols, 1)
jacobian = symbolic_values_vector.jacobian(parameters_vector)
# Set in the evaluation point.
for row in range(0, parameters.rows):
for col in range(0, parameters.cols):
jacobian = jacobian.subs(parameters[row, col], parameter_values[row, col])
# Simplify the jacobian.
jacobian = simplify(jacobian)
return (values, jacobian)
# Print info about initial state.
print('Taking the Jacobian of these functions (sorted from inner to outer):')
for i in range(len(functions) - 1, -1, -1):
print(str(functions[i]))
print('with respect to:')
pprint(parameters)
print('at position:')
pprint(parameter_values)
print('')
# Loop over all functions:
for i in range(len(functions) - 1, -1, -1):
# Compute value and Jacobian of this function.
(values, jacobian) = ComputeValueAndJacobian(functions[i], parameters, parameter_values)
# Update parameter_values
parameter_values = values
# Update parameters (create a new symbolic vector of the same size as parameter_values)
parameters = Matrix(values.rows, values.cols, lambda i,j:var('T_%d%d' % (i,j)))
# Concatenate this Jacobian with the previous one according to the chain rule:
previous_jacobian = jacobian * previous_jacobian
# Print intermediate result
print('Intermediate step ' + str(len(functions) - i) + ', for ' + str(functions[i]))
print('Position after function evaluation (function value):')
pprint(parameter_values)
print('Jacobian of this function wrt. its input only:')
pprint(jacobian)
print('Cumulative Jacobian wrt. the innermost parameter:')
pprint(previous_jacobian)
print('')
# Print final result
print('Final result:')
pprint(previous_jacobian)
|
tests/tests_basic.py | mehrdad-shokri/fluxcapacitor | 648 | 37323 | import os
import tests
from tests import at_most, compile, savefile
import subprocess
node_present = True
erlang_present = True
if os.system("node -v >/dev/null 2>/dev/null") != 0:
print " [!] ignoring nodejs tests"
node_present = False
if (os.system("erl -version >/dev/null 2>/dev/null") != 0 or
os.system("which escript >/dev/null 2>/dev/null") != 0):
print " [!] ignoring erlang tests"
erlang_present = False
sleep_sort_script='''\
#!/bin/bash
echo "Unsorted: $*"
function f() {
sleep "$1"
echo -n "$1 "
}
while [ -n "$1" ]; do
f "$1" &
shift
done
wait
echo
'''
class SingleProcess(tests.TestCase):
@at_most(seconds=2)
def test_bash_sleep(self):
self.system("sleep 10")
@at_most(seconds=2)
def test_bash_bash_sleep(self):
self.system("bash -c 'sleep 120;'")
@at_most(seconds=2)
def test_python2_sleep(self):
self.system('python2 -c "import time; time.sleep(10)"')
@at_most(seconds=2)
def test_python2_select(self):
self.system('python2 -c "import select; select.select([],[],[], 10)"')
@at_most(seconds=2)
def test_python2_poll(self):
self.system('python2 -c "import select; select.poll().poll(10000)"')
@at_most(seconds=2)
def test_python2_epoll(self):
self.system('python2 -c "import select; select.epoll().poll(10000)"')
@at_most(seconds=2)
def test_node_epoll(self):
if node_present:
self.system('node -e "setTimeout(function(){},10000);"')
def test_bad_command(self):
self.system('command_that_doesnt exist',
returncode=127, ignore_stderr=True)
def test_return_status(self):
self.system('python2 -c "import sys; sys.exit(188)"', returncode=188)
self.system('python2 -c "import sys; sys.exit(-1)"', returncode=255)
@at_most(seconds=2)
@compile(code='''
#include <unistd.h>
int main() {
sleep(10);
return(0);
}''')
def test_c_sleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=2)
@compile(code='''
#include <time.h>
int main() {
struct timespec ts = {1, 0};
nanosleep(&ts, NULL);
return(0);
}''')
def test_c_nanosleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K false -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp_no_epoll(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
self() ! msg,
proc(10),
receive
_ -> ok
end.
proc(0) ->
receive
_ -> halt(0)
end;
proc(N) ->
Pid = spawn(fun () -> proc(N-1) end),
receive
_ -> timer:sleep(1000),
Pid ! msg
end.
''')
def test_erlang_process_staircase(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=2)
def test_perl_sleep(self):
self.system("perl -e 'sleep 10'")
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 1 12 1231 123213 13212 > /dev/null" % (filename,))
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 5 3 6 3 6 3 1 4 7 > /dev/null" % (filename,))
@at_most(seconds=10)
def test_parallel_sleeps(self):
for i in range(10):
stdout = self.system(' -- '.join(['bash -c "date +%s"',
'bash -c "sleep 60; date +%s"',
'bash -c "sleep 120; date +%s"']),
capture_stdout=True)
a, b, c = [int(l) for l in stdout.split()]
assert 55 < (b - a) < 65, str(b-a)
assert 55 < (c - b) < 65, str(c-b)
assert 110 < (c - a) < 130, str(c-a)
@at_most(seconds=3)
def test_file_descriptor_leak(self):
out = subprocess.check_output("ls /proc/self/fd", shell=True)
normal_fds = len(out.split('\n'))
stdout = self.system(' -- '.join(['sleep 1',
'sleep 60',
'sleep 120',
'bash -c "sleep 180; ls /proc/self/fd"']),
capture_stdout=True)
after_fork_fds = len(stdout.split('\n'))
assert normal_fds == after_fork_fds
@at_most(seconds=4)
def test_2546_wraparound(self):
if os.uname()[4] == "x86_64":
stdout = self.system("bash -c 'for i in `seq 1 55`; do sleep 315360000; done; date +%Y'",
capture_stdout=True)
assert int(stdout) > 2500
if __name__ == '__main__':
import unittest
unittest.main()
|
examples/perf/rnn/simple_rnn.py | yuhonghong66/minpy | 1,271 | 37347 | <filename>examples/perf/rnn/simple_rnn.py
import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
from collections import namedtuple
import time
import math
RNNState = namedtuple("RNNState", ["h"])
RNNParam = namedtuple("RNNParam", ["i2h_weight", "i2h_bias",
"h2h_weight", "h2h_bias"])
RNNModel = namedtuple("RNNModel", ["rnn_exec", "symbol",
"init_states", "last_states",
"seq_data", "seq_labels", "seq_outputs",
"param_blocks"])
def rnn(num_hidden, in_data, prev_state, param, seqidx, layeridx):
i2h = mx.sym.FullyConnected(data=in_data,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if seqidx > 0:
h2h = mx.sym.FullyConnected(data=prev_state,
weight=param.h2h_weight,
bias=param.h2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_h2h" % (seqidx, layeridx))
hidden = i2h + h2h
else:
hidden = i2h
hidden = mx.sym.Activation(data=hidden, act_type="tanh")
return RNNState(h=hidden)
def rnn_unroll(num_rnn_layer, seq_len, input_size,
num_hidden, num_label):
cls_weight = mx.sym.Variable("cls_weight")
cls_bias = mx.sym.Variable("cls_bias")
param_cells = []
for i in range(num_rnn_layer):
param_cells.append(RNNParam(i2h_weight = mx.sym.Variable("l%d_i2h_weight" % i),
i2h_bias = mx.sym.Variable("l%d_i2h_bias" % i),
h2h_weight = mx.sym.Variable("l%d_h2h_weight" % i),
h2h_bias = mx.sym.Variable("l%d_h2h_bias" % i)))
loss_all = []
ori_data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
data_timestamp = mx.sym.SliceChannel(data=ori_data, num_outputs=seq_len, squeeze_axis=1)
hidden = None
for seqidx in range(seq_len):
in_data = data_timestamp[seqidx]
next_state = rnn(num_hidden, in_data=in_data,
prev_state=hidden,
param=param_cells[i],
seqidx=seqidx, layeridx=i)
hidden = next_state.h
fc = mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, num_hidden=num_label)
reg = mx.sym.LinearRegressionOutput(data=fc, label=label)
return reg
|
openfda/nsde/pipeline.py | FDA/openfda | 388 | 37368 | #!/usr/local/bin/python
'''
Pipeline for converting CSV nsde data to JSON and importing into Elasticsearch.
'''
import glob
import os
from os.path import join, dirname
import luigi
from openfda import common, config, parallel, index_util
from openfda.common import newest_file_timestamp
NSDE_DOWNLOAD = \
'https://download.open.fda.gov/Comprehensive_NDC_SPL_Data_Elements_File.zip'
NSDE_EXTRACT_DB = 'nsde/nsde.db'
NSDE_RAW_DIR = config.data_dir('nsde/raw')
class DownloadNSDE(luigi.Task):
def output(self):
return luigi.LocalTarget(join(NSDE_RAW_DIR, 'nsde.csv'))
def run(self):
output_dir = dirname(self.output().path)
zip_filename = join(output_dir, 'nsde.zip')
common.download(NSDE_DOWNLOAD, zip_filename)
os.system('unzip -o %(zip_filename)s -d %(output_dir)s' % locals())
os.rename(glob.glob(join(output_dir, '*.csv'))[0], self.output().path)
class NSDE2JSONMapper(parallel.Mapper):
rename_map = {
"Item Code": "package_ndc",
"NDC11": "package_ndc11",
"Marketing Category": "marketing_category",
"Marketing Start Date": "marketing_start_date",
"Marketing End Date": "marketing_end_date",
"Billing Unit": "billing_unit",
"Proprietary Name": "proprietary_name",
"Dosage Form": "dosage_form",
"Application Number or Citation": "application_number_or_citation",
"Product Type": "product_type",
"Inactivation Date": "inactivation_date",
"Reactivation Date": "reactivation_date"
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
if k in self.rename_map and v is not None and v != '':
if "Date" in k:
return (self.rename_map[k], str(int(v)))
if "Proprietary Name" in k:
return (self.rename_map[k], str(v).title())
else:
return (self.rename_map[k], v)
new_value = common.transform_dict(value, _cleaner)
output.add(key, new_value)
class NSDE2JSON(luigi.Task):
def requires(self):
return DownloadNSDE()
def output(self):
return luigi.LocalTarget(config.data_dir(NSDE_EXTRACT_DB))
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
self.input().path, parallel.CSVDictLineInput()),
mapper=NSDE2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'othernsde'
type_name = 'othernsde'
mapping_file = './schemas/othernsde_mapping.json'
data_source = NSDE2JSON()
use_checksum = False
optimize_index = True
last_update_date = lambda _: newest_file_timestamp(NSDE_RAW_DIR)
if __name__ == '__main__':
luigi.run()
|
examples/c/cdecl.py | rakati/ppci-mirror | 161 | 37406 | """ Implement alike logic as is done on www.cdecl.org
Try for example:
$ cdelc.py 'char **a;'
"""
import argparse
import io
from ppci.api import get_current_arch
from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics
from ppci.lang.c.nodes import types, declarations
from ppci.lang.c.preprocessor import prepare_for_parsing
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', type=str)
args = parser.parse_args()
# print('Source:', args.source)
# Parse into ast:
arch = get_current_arch()
coptions = COptions()
ccontext = CContext(coptions, arch.info)
semantics = CSemantics(ccontext)
cparser = CParser(coptions, semantics)
clexer = CLexer(COptions())
f = io.StringIO(args.source)
tokens = clexer.lex(f, '<snippet>')
tokens = prepare_for_parsing(tokens, cparser.keywords)
cparser.init_lexer(tokens)
semantics.begin()
decl = cparser.parse_declarations()[0]
# Explain:
def explain(x):
if isinstance(x, declarations.VariableDeclaration):
return '{} is {}'.format(x.name, explain(x.typ))
elif isinstance(x, types.PointerType):
return 'a pointer to {}'.format(explain(x.element_type))
elif isinstance(x, types.ArrayType):
return 'an array of {}'.format(explain(x.element_type))
elif isinstance(x, types.BasicType):
return '{}'.format(x.type_id)
else:
print('???', x)
print(explain(decl))
|
src/curt/curt/modules/vision/vision_processor_service.py | sanyaade-teachings/cep | 108 | 37418 | """
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
# need to advertise different processor type, eg CPU, GPU, TPU
import traceback
import logging
from curt.base_service import BaseService
class VisionProcessorService(BaseService):
def __init__(self):
super().__init__("VisionProcessor")
def execute_function(self, worker, data):
config_worker = data[-1]
try:
if config_worker:
return worker.config_worker(data[0])
else:
if isinstance(data[0], list):
return worker.run_inference(data[0])
elif isinstance(data[0], dict):
data_list = []
for param in data[0]["ready_data"]:
data_list.append(param)
for guid in data[0].keys():
if guid != "ready_data":
data_list.append(data[0][guid])
return worker.run_inference(data_list)
except Exception as e:
logging.error(traceback.format_exc())
|
training/train_nav.py | catalina17/EmbodiedQA | 289 | 37429 | import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
Chapter05/examine_tar_file_content.py | add54/ADMIN_SYS_PYTHON | 116 | 37442 | <filename>Chapter05/examine_tar_file_content.py
import tarfile
tar_file = tarfile.open("work.tar.gz", "r:gz")
print(tar_file.getnames())
|
src/hg/makeDb/scripts/cd8Escape/process_epitopes.py | andypohl/kent | 171 | 37447 | import os
import re
import gzip
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
def get_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Method to create track for escape mutations")
parser.add_argument("-xlsx", help="file containing all the data")
parser.add_argument("-pid", help="pep to number", default="prot_names_pids_8.txt")
parser.add_argument("-gb_tools", help="path to gb_tools", default="./")
args = parser.parse_args()
return args
def read_pid(args):
inputfilehandler = open(args.pid, 'r')
pid = {}
aaid = {}
nucid = {}
for line in inputfilehandler:
line = line.strip()
fields = line.split()
peptide = fields[0]
pid[peptide] = fields[1]
nucid[peptide] = fields[2]
aaid[peptide] = fields[3]
inputfilehandler.close()
return (pid, aaid, nucid)
def get_start_pos(peptide, pid, aaid, nucid):
first_eight = ''.join(list(peptide)[0:8])
if first_eight in pid:
return nucid[first_eight]
return -1
def main(args):
(pid, aaid, nucid) = read_pid(args)
cd8_epitopes = pd.read_excel(args.xlsx,
skiprows=0,
header=0,
index_col=None)
print (cd8_epitopes.columns)
outfiletag = 'escape_mutations'
beddetailfilename = outfiletag+'.beddetail'
bedfilename = outfiletag+'.bed'
bbfilename = outfiletag+'.bb'
#print (cd8_epitopes['Probable Infection Location'])
#print (cd8_epitopes['Gene'])
#print (cd8_epitopes['Position of Mutation'])
#print (cd8_epitopes['AA Change'])
#print (cd8_epitopes['Codon Change'])
#print (cd8_epitopes['Wildtype Sequence'])
#print (cd8_epitopes['Mutant Sequence 1'])
#print (cd8_epitopes['Mutant Sequence 2'])
wt_mt = defaultdict(list)
mutations = []
beddetailfilehandler = open(beddetailfilename, 'w')
for i in range(0, len(cd8_epitopes['Position of Mutation'])):
chrom = "NC_045512v2"
reserved = 0
score = 1000
strand = '+'
pom = cd8_epitopes['Position of Mutation'][i]
gene = cd8_epitopes['Gene'][i]
pil = cd8_epitopes['Probable Infection Location'][i]
aa_change = cd8_epitopes['AA Change'][i]
c_change = cd8_epitopes['Codon Change'][i]
if gene+'_'+c_change+'_'+aa_change not in mutations:
mutations.append(gene+'_'+c_change+'_'+aa_change)
if ';' not in cd8_epitopes['Wildtype Sequence'][i]:
chromStart = get_start_pos(cd8_epitopes['Wildtype Sequence'][i], pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(cd8_epitopes['Wildtype Sequence'][i]))*3+int(chromStart))
thickStart = str(chromStart)
thickEnd = str(chromEnd)
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
mt_pep = cd8_epitopes['Mutant Sequence 1'][i]
if wt_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt_pep in wt_mt[wt_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt_pep+"\n")
else:
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
wt1_pep = wt_pep.split(';')[0]
wt2_pep = wt_pep.split(';')[1]
mt1_pep = cd8_epitopes['Mutant Sequence 1'][i]
mt2_pep = cd8_epitopes['Mutant Sequence 2'][i]
chromStart = get_start_pos(wt1_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt1_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt1_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt1_pep in wt_mt[wt1_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt1_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt1_pep+"\n")
chromStart = get_start_pos(wt2_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt2_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt2_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt2_pep in wt_mt[wt2_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt2_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt2_pep+"\n")
beddetailfilehandler.close()
print (len(mutations))
# use gbtools to convert from beddetail to bed and bigbed
os.system(f"bedSort {beddetailfilename} {bedfilename}")
os.system(f"bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as")
if __name__ == "__main__":
main(get_args())
|
rest_framework_social_oauth2/settings.py | hrahmadi71/django-rest-framework-social-oauth2 | 613 | 37452 | from django.conf import settings
DRFSO2_PROPRIETARY_BACKEND_NAME = getattr(settings, 'DRFSO2_PROPRIETARY_BACKEND_NAME', "Django")
DRFSO2_URL_NAMESPACE = getattr(settings, 'DRFSO2_URL_NAMESPACE', "")
|
src/mcedit2/widgets/propertylist.py | elcarrion06/mcedit2 | 673 | 37509 | """
propertylist
"""
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import logging
from PySide.QtCore import Qt
from mceditlib import nbt
from PySide import QtGui, QtCore
from mcedit2.util.load_ui import registerCustomWidget
log = logging.getLogger(__name__)
class PropertyListItemDelegate(QtGui.QStyledItemDelegate):
def __init__(self, *args, **kwargs):
super(PropertyListItemDelegate, self).__init__(*args, **kwargs)
def createEditor(self, parent, option, index):
model = index.model()
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
valueWidget = QtGui.QSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is float:
valueWidget = QtGui.QDoubleSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is bool:
valueWidget = QtGui.QCheckBox()
elif isinstance(valueType, list): # Choice list
valueWidget = QtGui.QComboBox()
for value, name in valueType:
valueWidget.addItem(name, value)
elif valueType is unicode:
valueWidget = QtGui.QPlainTextEdit()
else:
raise TypeError("Can't create attribute widgets for %s yet" % valueType)
valueWidget.setParent(parent)
return valueWidget
def setEditorData(self, editor, index):
model = index.model()
rootTag = model.rootTag
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
editor.setValue(rootTag[tagName].value)
elif valueType is float:
editor.setValue(rootTag[tagName].value)
elif valueType is bool:
editor.setChecked(rootTag[tagName].value)
elif isinstance(valueType, list): # Choice list
currentValue = rootTag[tagName].value
try:
currentIndex = [v for v, n in valueType].index(currentValue)
editor.setCurrentIndex(currentIndex)
except ValueError:
editor.addItem("Unknown value %s" % currentValue, currentValue)
elif valueType is unicode:
editor.setPlainText(rootTag[tagName].value)
else:
raise TypeError("Unknown valueType in setEditorData (check this in addNBTProperty, dummy)")
def setModelData(self, editor, model, index):
tagName, displayName, valueType, min, max = model.properties[index.row()]
rootTag = model.rootTag
if valueType is int:
value = int(editor.value())
elif valueType is float:
value = float(editor.value())
elif valueType is bool:
value = editor.isChecked()
elif isinstance(valueType, list): # Choice list
value = valueType[editor.currentIndex()][0]
elif valueType is unicode:
value = editor.plainText()
else:
raise TypeError("Unknown valueType in setModelData (check this in addNBTProperty, dummy)")
model.setData(index, value)
class PropertyListEntry(namedtuple('PropertyListEntry', 'tagName displayName valueType min max')):
pass
class PropertyListModel(QtCore.QAbstractItemModel):
propertyChanged = QtCore.Signal(unicode, object)
def __init__(self, rootTag):
super(PropertyListModel, self).__init__()
self.rootTag = rootTag
self.properties = []
def addNBTProperty(self, tagName, valueType=None, min=None, max=None, displayName=None):
if displayName is None:
displayName = tagName
if valueType is None:
valueType = int
if tagName not in self.rootTag:
return
tag = self.rootTag[tagName]
if tag.tagID == nbt.ID_BYTE:
tagMin = -(1 << 7)
tagMax = (1 << 7) - 1
elif tag.tagID == nbt.ID_SHORT:
tagMin = -(1 << 15)
tagMax = (1 << 15) - 1
elif tag.tagID == nbt.ID_INT:
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
else: # tag.tagID == nbt.ID_LONG, ID_FLOAT, ID_DOUBLE
# tagMin = -(1 << 63) # xxxx 64-bit spinbox
# tagMax = (1 << 63) - 1
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
if min is None:
min = tagMin
if max is None:
max = tagMax
self.properties.append(PropertyListEntry(tagName, displayName, valueType, min, max))
def columnCount(self, index):
return 2
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
entry = self.properties[index.row()]
if role in (Qt.DisplayRole, Qt.EditRole):
if index.column() == 0:
return entry.displayName
else:
value = self.rootTag[entry.tagName].value
if isinstance(entry.valueType, (list, tuple)):
try:
return entry.valueType[value][1]
except IndexError:
return "Unknown value %s" % value
else:
return value
# if role == Qt.CheckStateRole:
# if entry.valueType is not bool:
# return -1
# value = self.rootTag[entry.tagName].value
# return bool(value)
def flags(self, index):
if not index.isValid():
return 0
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 1:
flags |= Qt.ItemIsEditable
entry = self.properties[index.row()]
#if entry.valueType is bool:
# flags |= Qt.ItemIsUserCheckable
return flags
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return ("Name", "Value")[section]
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if parent.isValid():
return QtCore.QModelIndex()
return self.createIndex(row, column, None)
def parent(self, index):
return QtCore.QModelIndex()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
return len(self.properties)
def setData(self, index, value, role=Qt.EditRole):
row = index.row()
entry = self.properties[row]
if self.rootTag[entry.tagName].value != value:
self.rootTag[entry.tagName].value = value
self.propertyChanged.emit(entry.tagName, value)
self.dataChanged.emit(index, index)
@registerCustomWidget
class PropertyListWidget(QtGui.QTreeView):
def __init__(self, *args, **kwargs):
super(PropertyListWidget, self).__init__(*args, **kwargs)
delegate = PropertyListItemDelegate()
self.setItemDelegate(delegate)
self.setEditTriggers(self.CurrentChanged | self.editTriggers())
|
tests/torch_api/test_multi_models.py | mmathys/bagua | 635 | 37522 | <reponame>mmathys/bagua<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
from tests.internal.common_utils import find_free_port
import unittest
import multiprocessing
import os
from bagua.torch_api.utils import flatten
import bagua.torch_api as bagua
from tests import skip_if_cuda_not_available
N_EPOCHS = 10
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=True)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 30, bias=True)
self.fc3 = nn.Linear(30, 20, bias=True)
self.fc4 = nn.Linear(20, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = self.fc4(x)
return F.softmax(x, dim=1)
def _init_bagua_env(rank, env):
# set deterministic
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(rank)
# initialize subprocess env
os.environ["WORLD_SIZE"] = env["WORLD_SIZE"]
os.environ["LOCAL_WORLD_SIZE"] = env["LOCAL_WORLD_SIZE"]
os.environ["MASTER_ADDR"] = env["MASTER_ADDR"]
os.environ["MASTER_PORT"] = env["MASTER_PORT"]
os.environ["BAGUA_SERVICE_PORT"] = env["BAGUA_SERVICE_PORT"]
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
# init bagua distributed process group
torch.cuda.set_device(rank)
bagua.init_process_group()
def _init_torch_env(rank, nprocs, backend):
# set deterministic
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(rank)
# init torch distributed process group
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
world_size=nprocs,
rank=rank,
backend=backend,
init_method="file:///tmp/.bagua.test.filestore",
)
def run_model(
rank,
results,
env,
):
_init_bagua_env(rank, env)
# construct model and optimizer, etc.
model_1 = Net1().cuda()
optimizer_1 = torch.optim.SGD(model_1.parameters(), lr=0.01)
loss_fn_1 = nn.MSELoss()
model_2 = Net2().cuda()
optimizer_2 = torch.optim.SGD(model_2.parameters(), lr=0.01)
loss_fn_2 = nn.MSELoss()
# wrap model
from bagua.torch_api.algorithms import gradient_allreduce
algorithm = gradient_allreduce.GradientAllReduceAlgorithm()
model_1 = model_1.with_bagua([optimizer_1], algorithm)
model_2 = model_2.with_bagua([optimizer_2], algorithm)
ret = results[rank]
ret.init_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.init_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
for epoch in range(N_EPOCHS):
data_1 = torch.randn(8, 2).cuda()
target_1 = torch.randn(8, 4).cuda()
optimizer_1.zero_grad()
output_1 = model_1(data_1)
loss_1 = loss_fn_1(output_1, target_1)
loss_1.backward()
optimizer_1.step()
data_2 = torch.randn(8, 2).cuda()
target_2 = torch.randn(8, 4).cuda()
optimizer_2.zero_grad()
output_2 = model_2(data_2)
loss_2 = loss_fn_2(output_2, target_2)
loss_2.backward()
optimizer_2.step()
ret.end_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.end_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
def run_torch_model(
rank,
nprocs,
results,
backend,
env,
):
_init_torch_env(rank, nprocs, backend)
# construct model and optimizer, etc.
model_1 = Net1().cuda()
optimizer_1 = torch.optim.SGD(model_1.parameters(), lr=0.01)
loss_fn_1 = nn.MSELoss()
model_2 = Net2().cuda()
optimizer_2 = torch.optim.SGD(model_2.parameters(), lr=0.01)
loss_fn_2 = nn.MSELoss()
# wrap model
model_1 = torch.nn.parallel.DistributedDataParallel(model_1, device_ids=[rank])
model_2 = torch.nn.parallel.DistributedDataParallel(model_2, device_ids=[rank])
ret = results[rank]
ret.init_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.init_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
for epoch in range(N_EPOCHS):
data_1 = torch.randn(8, 2).cuda()
target_1 = torch.randn(8, 4).cuda()
optimizer_1.zero_grad()
output_1 = model_1(data_1)
loss_1 = loss_fn_1(output_1, target_1)
loss_1.backward()
optimizer_1.step()
data_2 = torch.randn(8, 2).cuda()
target_2 = torch.randn(8, 4).cuda()
optimizer_2.zero_grad()
output_2 = model_2(data_2)
loss_2 = loss_fn_2(output_2, target_2)
loss_2.backward()
optimizer_2.step()
ret.end_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.end_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
class Result(object):
def __init__(self):
model_1 = Net1()
model_2 = Net2()
self.init_weight_1 = flatten(
[torch.zeros_like(param.data) for param in model_1.parameters()]
)
self.end_weight_1 = flatten(
[torch.zeros_like(param.data) for param in model_1.parameters()]
)
self.init_weight_2 = flatten(
[torch.zeros_like(param.data) for param in model_2.parameters()]
)
self.end_weight_2 = flatten(
[torch.zeros_like(param.data) for param in model_2.parameters()]
)
class TestMultiModels(unittest.TestCase):
@skip_if_cuda_not_available()
def test_multi_models(self):
nprocs = torch.cuda.device_count()
env = {}
mp = multiprocessing.get_context("spawn")
torch_results = [Result() for _ in range(nprocs)]
processes = []
backend = "gloo"
for i in range(nprocs):
p = mp.Process(
target=run_torch_model,
args=(
i,
nprocs,
torch_results,
backend,
env,
),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
env = {
"WORLD_SIZE": str(nprocs),
"LOCAL_WORLD_SIZE": str(nprocs),
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(find_free_port(8000, 8100)),
"BAGUA_SERVICE_PORT": str(find_free_port(9000, 9100)),
}
bagua_results = [Result() for _ in range(nprocs)]
processes = []
for i in range(nprocs):
p = mp.Process(
target=run_model,
args=(
i,
bagua_results,
env,
),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
for rank in range(nprocs):
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].init_weight_1,
torch_results[rank].init_weight_1,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].end_weight_1,
torch_results[rank].end_weight_1,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].init_weight_2,
torch_results[rank].init_weight_2,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].end_weight_2,
torch_results[rank].end_weight_2,
)
).item()
)
if __name__ == "__main__":
unittest.main()
|
peregrinearb/utils/single_exchange.py | kecheon/peregrine | 954 | 37526 | import asyncio
import math
import networkx as nx
import ccxt.async_support as ccxt
import datetime
import logging
from .logging_utils import FormatForLogAdapter
__all__ = [
'FeesNotAvailable',
'create_exchange_graph',
'load_exchange_graph',
]
adapter = FormatForLogAdapter(logging.getLogger('peregrinearb.utils.single_exchange'))
class FeesNotAvailable(Exception):
pass
def create_exchange_graph(exchange: ccxt.Exchange):
"""
Returns a simple graph representing exchange. Each edge represents a market.
exchange.load_markets() must have been called. Will throw a ccxt error if it has not.
"""
graph = nx.Graph()
for market_name in exchange.symbols:
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
continue
graph.add_edge(base_currency, quote_currency, market_name=market_name)
return graph
async def load_exchange_graph(exchange, name=True, fees=True, suppress=None, depth=False, tickers=None) -> nx.DiGraph:
"""
Returns a networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges). If depth, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
if suppress is None:
suppress = ['markets']
if name:
exchange = getattr(ccxt, exchange)()
if tickers is None:
adapter.info('Fetching tickers')
tickers = await exchange.fetch_tickers()
adapter.info('Fetched tickers')
market_count = len(tickers)
adapter.info('Loading exchange graph', marketCount=market_count)
adapter.debug('Initializing empty graph with exchange_name and timestamp attributes')
graph = nx.DiGraph()
# todo: get exchange's server time?
graph.graph['exchange_name'] = exchange.id
graph.graph['datetime'] = datetime.datetime.now(tz=datetime.timezone.utc)
adapter.debug('Initialized empty graph with exchange_name and timestamp attributes')
async def add_edges():
tasks = [_add_weighted_edge_to_graph(exchange, market_name, graph, log=True, fees=fees,
suppress=suppress, ticker=ticker, depth=depth, )
for market_name, ticker in tickers.items()]
await asyncio.wait(tasks)
if fees:
for i in range(20):
try:
adapter.info('Loading fees', iteration=i)
# must load markets to get fees
await exchange.load_markets()
except (ccxt.DDoSProtection, ccxt.RequestTimeout) as e:
if i == 19:
adapter.warning('Rate limited on final iteration, raising error', iteration=i)
raise e
adapter.warning('Rate limited when loading markets', iteration=i)
await asyncio.sleep(0.1)
except ccxt.ExchangeNotAvailable as e:
if i == 19:
adapter.warning('Cannot load markets due to ExchangeNotAvailable error, '
'graph will not be loaded.', iteration=i)
raise e
adapter.warning('Received ExchangeNotAvailable error when loading markets', iteration=i)
else:
break
adapter.info('Loaded fees', iteration=i, marketCount=market_count)
currency_count = len(exchange.currencies)
adapter.info('Adding data to graph', marketCount=market_count, currencyCount=currency_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count, currencyCount=currency_count)
else:
adapter.info('Adding data to graph', marketCount=market_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count)
adapter.debug('Closing connection')
await exchange.close()
adapter.debug('Closed connection')
adapter.info('Loaded exchange graph')
return graph
async def _add_weighted_edge_to_graph(exchange: ccxt.Exchange, market_name: str, graph: nx.DiGraph, log=True,
fees=False, suppress=None, ticker=None, depth=False, ):
"""
todo: add global variable to bid_volume/ ask_volume to see if all tickers (for a given exchange) have value == None
Returns a Networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges).
:param exchange: A ccxt Exchange object
:param market_name: A string representing a cryptocurrency market formatted like so:
'{base_currency}/{quote_currency}'
:param graph: A Networkx DiGraph upon
:param log: If the edge weights given to the graph should be the negative logarithm of the ask and bid prices. This
is necessary to calculate arbitrage opportunities.
:param fees: If fees should be taken into account for prices.
:param suppress: A list or set which tells which types of warnings to not throw. Accepted elements are 'markets'.
:param ticker: A dictionary representing a market as returned by ccxt's Exchange's fetch_ticker method
:param depth: If True, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
adapter.debug('Adding edge to graph', market=market_name)
if ticker is None:
try:
adapter.info('Fetching ticker', market=market_name)
ticker = await exchange.fetch_ticker(market_name)
adapter.info('Fetched ticker', market=market_name)
# any error is solely because of fetch_ticker
except:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
if fees:
if 'taker' in exchange.markets[market_name]:
# we always take the taker side because arbitrage depends on filling orders
# sell_fee_dict = exchange.calculate_fee(market_name, 'limit', 'sell', 0, 0, 'taker')
# buy_fee_dict = exchange.calculate_fee(market_name, 'limit', 'buy', 0, 0, 'taker')
fee = exchange.markets[market_name]['taker']
else:
if 'fees' not in suppress:
adapter.warning("The fees for {} have not yet been implemented into ccxt's uniform API."
.format(exchange))
raise FeesNotAvailable('Fees are not available for {} on {}'.format(market_name, exchange.id))
else:
fee = 0.002
else:
fee = 0
fee_scalar = 1 - fee
try:
bid_rate = ticker['bid']
ask_rate = ticker['ask']
if depth:
bid_volume = ticker['bidVolume']
ask_volume = ticker['askVolume']
if bid_volume is None:
adapter.warning('Market is unavailable because its bid volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
if ask_volume is None:
adapter.warning('Market is unavailable because its ask volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
# ask and bid == None if this market is non existent.
except TypeError:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
# Exchanges give asks and bids as either 0 or None when they do not exist.
# todo: should we account for exchanges upon which an ask exists but a bid does not (and vice versa)? Would this
# cause bugs?
if ask_rate == 0 or bid_rate == 0 or ask_rate is None or bid_rate is None:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time due to incorrect formatting. '
'It will not be included in the graph.', market=market_name)
return
if log:
if depth:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
depth=-math.log(bid_volume), market_name=market_name, trade_type='SELL',
fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
depth=-math.log(ask_volume * ask_rate), market_name=market_name, trade_type='BUY',
fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
else:
if depth:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate, depth=bid_volume,
market_name=market_name, trade_type='SELL', fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate, depth=ask_volume,
market_name=market_name, trade_type='BUY', fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate,
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate,
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
adapter.debug('Added edge to graph', market=market_name)
|
src/python/web/handler/status.py | AlekLT/seedsync | 255 | 37537 | # Copyright 2017, <NAME>, All rights reserved.
from bottle import HTTPResponse
from common import Status, overrides
from ..web_app import IHandler, WebApp
from ..serialize import SerializeStatusJson
class StatusHandler(IHandler):
def __init__(self, status: Status):
self.__status = status
@overrides(IHandler)
def add_routes(self, web_app: WebApp):
web_app.add_handler("/server/status", self.__handle_get_status)
def __handle_get_status(self):
out_json = SerializeStatusJson.status(self.__status)
return HTTPResponse(body=out_json)
|
datasets/ett/ett.py | leondz/datasets | 3,395 | 37539 | # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Electricity Transformer Temperature (ETT) dataset."""
from dataclasses import dataclass
import pandas as pd
import datasets
_CITATION = """\
@inproceedings{haoyietal-informer-2021,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
booktitle = {The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021, Virtual Conference},
volume = {35},
number = {12},
pages = {11106--11115},
publisher = {{AAAI} Press},
year = {2021},
}
"""
_DESCRIPTION = """\
The data of Electricity Transformers from two separated counties
in China collected for two years at hourly and 15-min frequencies.
Each data point consists of the target value "oil temperature" and
6 power load features. The train/val/test is 12/4/4 months.
"""
_HOMEPAGE = "https://github.com/zhouhaoyi/ETDataset"
_LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"h1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh1.csv",
"h2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh2.csv",
"m1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm1.csv",
"m2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm2.csv",
}
@dataclass
class ETTBuilderConfig(datasets.BuilderConfig):
"""ETT builder config."""
prediction_length: int = 24
multivariate: bool = False
class ETT(datasets.GeneratorBasedBuilder):
"""Electricity Transformer Temperature (ETT) dataset"""
VERSION = datasets.Version("1.0.0")
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('ett', 'h1')
# data = datasets.load_dataset('ett', 'm2')
BUILDER_CONFIGS = [
ETTBuilderConfig(
name="h1",
version=VERSION,
description="Time series from first county at hourly frequency.",
),
ETTBuilderConfig(
name="h2",
version=VERSION,
description="Time series from second county at hourly frequency.",
),
ETTBuilderConfig(
name="m1",
version=VERSION,
description="Time series from first county at 15-min frequency.",
),
ETTBuilderConfig(
name="m2",
version=VERSION,
description="Time series from second county at 15-min frequency.",
),
]
DEFAULT_CONFIG_NAME = "h1" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.multivariate:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"item_id": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Value("float32")),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"feat_dynamic_real": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"item_id": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
filepath = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "dev",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
data = pd.read_csv(filepath, parse_dates=True, index_col=0)
start_date = data.index.min()
if self.config.name in ["m1", "m2"]:
factor = 4 # 15-min frequency
else:
factor = 1 # hourly frequency
train_end_date_index = 12 * 30 * 24 * factor # 1 year
if split == "dev":
end_date_index = 12 * 30 * 24 + 4 * 30 * 24 * factor # 1 year + 4 months
else:
end_date_index = 12 * 30 * 24 + 8 * 30 * 24 * factor # 1 year + 8 months
if self.config.multivariate:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
yield i, {
"start": start_date,
"target": data[: index + self.config.prediction_length].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
yield 0, {
"start": start_date,
"target": data[:train_end_date_index].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
target = data["OT"][: index + self.config.prediction_length].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
: index + self.config.prediction_length
].values.T.astype("float32")
yield i, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
else:
target = data["OT"][:train_end_date_index].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
:train_end_date_index
].values.T.astype("float32")
yield 0, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
|
code/vendor/node_js2c.py | thorium-cfx/fivem | 5,411 | 37562 | <filename>code/vendor/node_js2c.py
import os
import subprocess
import sys
inputs = [
'lib/assert/strict.js',
'lib/assert.js',
'lib/async_hooks.js',
'lib/buffer.js',
'lib/child_process.js',
'lib/cluster.js',
'lib/console.js',
'lib/constants.js',
'lib/crypto.js',
'lib/dgram.js',
'lib/diagnostics_channel.js',
'lib/dns/promises.js',
'lib/dns.js',
'lib/domain.js',
'lib/events.js',
'lib/fs/promises.js',
'lib/fs.js',
'lib/http.js',
'lib/http2.js',
'lib/https.js',
'lib/inspector.js',
'lib/internal/abort_controller.js',
'lib/internal/assert/assertion_error.js',
'lib/internal/assert/calltracker.js',
'lib/internal/assert.js',
'lib/internal/async_hooks.js',
'lib/internal/blob.js',
'lib/internal/blocklist.js',
'lib/internal/bootstrap/environment.js',
'lib/internal/bootstrap/loaders.js',
'lib/internal/bootstrap/node.js',
'lib/internal/bootstrap/pre_execution.js',
'lib/internal/bootstrap/switches/does_not_own_process_state.js',
'lib/internal/bootstrap/switches/does_own_process_state.js',
'lib/internal/bootstrap/switches/is_main_thread.js',
'lib/internal/bootstrap/switches/is_not_main_thread.js',
'lib/internal/buffer.js',
'lib/internal/child_process/serialization.js',
'lib/internal/child_process.js',
'lib/internal/cli_table.js',
'lib/internal/cluster/child.js',
'lib/internal/cluster/primary.js',
'lib/internal/cluster/round_robin_handle.js',
'lib/internal/cluster/shared_handle.js',
'lib/internal/cluster/utils.js',
'lib/internal/cluster/worker.js',
'lib/internal/console/constructor.js',
'lib/internal/console/global.js',
'lib/internal/constants.js',
'lib/internal/crypto/aes.js',
'lib/internal/crypto/certificate.js',
'lib/internal/crypto/cipher.js',
'lib/internal/crypto/diffiehellman.js',
'lib/internal/crypto/dsa.js',
'lib/internal/crypto/ec.js',
'lib/internal/crypto/hash.js',
'lib/internal/crypto/hashnames.js',
'lib/internal/crypto/hkdf.js',
'lib/internal/crypto/keygen.js',
'lib/internal/crypto/keys.js',
'lib/internal/crypto/mac.js',
'lib/internal/crypto/pbkdf2.js',
'lib/internal/crypto/random.js',
'lib/internal/crypto/rsa.js',
'lib/internal/crypto/scrypt.js',
'lib/internal/crypto/sig.js',
'lib/internal/crypto/util.js',
'lib/internal/crypto/webcrypto.js',
'lib/internal/crypto/x509.js',
'lib/internal/debugger/inspect.js',
'lib/internal/debugger/inspect_client.js',
'lib/internal/debugger/inspect_repl.js',
'lib/internal/dgram.js',
'lib/internal/dns/promises.js',
'lib/internal/dns/utils.js',
'lib/internal/dtrace.js',
'lib/internal/encoding.js',
'lib/internal/errors.js',
'lib/internal/error_serdes.js',
'lib/internal/event_target.js',
'lib/internal/fixed_queue.js',
'lib/internal/freelist.js',
'lib/internal/freeze_intrinsics.js',
'lib/internal/fs/cp/cp-sync.js',
'lib/internal/fs/cp/cp.js',
'lib/internal/fs/dir.js',
'lib/internal/fs/promises.js',
'lib/internal/fs/read_file_context.js',
'lib/internal/fs/rimraf.js',
'lib/internal/fs/streams.js',
'lib/internal/fs/sync_write_stream.js',
'lib/internal/fs/utils.js',
'lib/internal/fs/watchers.js',
'lib/internal/heap_utils.js',
'lib/internal/histogram.js',
'lib/internal/http.js',
'lib/internal/http2/compat.js',
'lib/internal/http2/core.js',
'lib/internal/http2/util.js',
'lib/internal/idna.js',
'lib/internal/inspector_async_hook.js',
'lib/internal/js_stream_socket.js',
'lib/internal/legacy/processbinding.js',
'lib/internal/linkedlist.js',
'lib/internal/main/check_syntax.js',
'lib/internal/main/eval_stdin.js',
'lib/internal/main/eval_string.js',
'lib/internal/main/inspect.js',
'lib/internal/main/print_help.js',
'lib/internal/main/prof_process.js',
'lib/internal/main/repl.js',
'lib/internal/main/run_main_module.js',
'lib/internal/main/worker_thread.js',
'lib/internal/modules/cjs/helpers.js',
'lib/internal/modules/cjs/loader.js',
'lib/internal/modules/esm/create_dynamic_module.js',
'lib/internal/modules/esm/get_format.js',
'lib/internal/modules/esm/get_source.js',
'lib/internal/modules/esm/loader.js',
'lib/internal/modules/esm/module_job.js',
'lib/internal/modules/esm/module_map.js',
'lib/internal/modules/esm/resolve.js',
'lib/internal/modules/esm/transform_source.js',
'lib/internal/modules/esm/translators.js',
'lib/internal/modules/package_json_reader.js',
'lib/internal/modules/run_main.js',
'lib/internal/net.js',
'lib/internal/options.js',
'lib/internal/perf/event_loop_delay.js',
'lib/internal/perf/event_loop_utilization.js',
'lib/internal/perf/nodetiming.js',
'lib/internal/perf/observe.js',
'lib/internal/perf/performance.js',
'lib/internal/perf/performance_entry.js',
'lib/internal/perf/timerify.js',
'lib/internal/perf/usertiming.js',
'lib/internal/perf/utils.js',
'lib/internal/per_context/domexception.js',
'lib/internal/per_context/messageport.js',
'lib/internal/per_context/primordials.js',
'lib/internal/policy/manifest.js',
'lib/internal/policy/sri.js',
'lib/internal/priority_queue.js',
'lib/internal/process/esm_loader.js',
'lib/internal/process/execution.js',
'lib/internal/process/per_thread.js',
'lib/internal/process/policy.js',
'lib/internal/process/promises.js',
'lib/internal/process/report.js',
'lib/internal/process/signal.js',
'lib/internal/process/task_queues.js',
'lib/internal/process/warning.js',
'lib/internal/process/worker_thread_only.js',
'lib/internal/querystring.js',
'lib/internal/readline/callbacks.js',
'lib/internal/readline/emitKeypressEvents.js',
'lib/internal/readline/utils.js',
'lib/internal/repl/await.js',
'lib/internal/repl/history.js',
'lib/internal/repl/utils.js',
'lib/internal/repl.js',
'lib/internal/socketaddress.js',
'lib/internal/socket_list.js',
'lib/internal/source_map/prepare_stack_trace.js',
'lib/internal/source_map/source_map.js',
'lib/internal/source_map/source_map_cache.js',
'lib/internal/streams/add-abort-signal.js',
'lib/internal/streams/buffer_list.js',
'lib/internal/streams/compose.js',
'lib/internal/streams/destroy.js',
'lib/internal/streams/duplex.js',
'lib/internal/streams/duplexify.js',
'lib/internal/streams/end-of-stream.js',
'lib/internal/streams/from.js',
'lib/internal/streams/lazy_transform.js',
'lib/internal/streams/legacy.js',
'lib/internal/streams/passthrough.js',
'lib/internal/streams/pipeline.js',
'lib/internal/streams/readable.js',
'lib/internal/streams/state.js',
'lib/internal/streams/transform.js',
'lib/internal/streams/utils.js',
'lib/internal/streams/writable.js',
'lib/internal/stream_base_commons.js',
'lib/internal/test/binding.js',
'lib/internal/test/transfer.js',
'lib/internal/timers.js',
'lib/internal/tls/parse-cert-string.js',
'lib/internal/tls/secure-context.js',
'lib/internal/tls/secure-pair.js',
'lib/internal/trace_events_async_hooks.js',
'lib/internal/tty.js',
'lib/internal/url.js',
'lib/internal/util/comparisons.js',
'lib/internal/util/debuglog.js',
'lib/internal/util/inspect.js',
'lib/internal/util/inspector.js',
'lib/internal/util/iterable_weak_map.js',
'lib/internal/util/types.js',
'lib/internal/util.js',
'lib/internal/v8_prof_polyfill.js',
'lib/internal/v8_prof_processor.js',
'lib/internal/validators.js',
'lib/internal/vm/module.js',
'lib/internal/watchdog.js',
'lib/internal/webstreams/encoding.js',
'lib/internal/webstreams/queuingstrategies.js',
'lib/internal/webstreams/readablestream.js',
'lib/internal/webstreams/transfer.js',
'lib/internal/webstreams/transformstream.js',
'lib/internal/webstreams/util.js',
'lib/internal/webstreams/writablestream.js',
'lib/internal/worker/io.js',
'lib/internal/worker/js_transferable.js',
'lib/internal/worker.js',
'lib/module.js',
'lib/net.js',
'lib/os.js',
'lib/path/posix.js',
'lib/path/win32.js',
'lib/path.js',
'lib/perf_hooks.js',
'lib/process.js',
'lib/punycode.js',
'lib/querystring.js',
'lib/readline.js',
'lib/repl.js',
'lib/stream/consumers.js',
'lib/stream/promises.js',
'lib/stream/web.js',
'lib/stream.js',
'lib/string_decoder.js',
'lib/sys.js',
'lib/timers/promises.js',
'lib/timers.js',
'lib/tls.js',
'lib/trace_events.js',
'lib/tty.js',
'lib/url.js',
'lib/util/types.js',
'lib/util.js',
'lib/v8.js',
'lib/vm.js',
'lib/wasi.js',
'lib/worker_threads.js',
'lib/zlib.js',
'lib/_http_agent.js',
'lib/_http_client.js',
'lib/_http_common.js',
'lib/_http_incoming.js',
'lib/_http_outgoing.js',
'lib/_http_server.js',
'lib/_stream_duplex.js',
'lib/_stream_passthrough.js',
'lib/_stream_readable.js',
'lib/_stream_transform.js',
'lib/_stream_wrap.js',
'lib/_stream_writable.js',
'lib/_tls_common.js',
'lib/_tls_wrap.js',
'deps/v8/tools/splaytree.mjs',
'deps/v8/tools/codemap.mjs',
'deps/v8/tools/consarray.mjs',
'deps/v8/tools/csvparser.mjs',
'deps/v8/tools/profile.mjs',
'deps/v8/tools/profile_view.mjs',
'deps/v8/tools/logreader.mjs',
'deps/v8/tools/arguments.mjs',
'deps/v8/tools/tickprocessor.mjs',
'deps/v8/tools/sourcemap.mjs',
'deps/v8/tools/tickprocessor-driver.mjs',
'deps/acorn/acorn/dist/acorn.js',
'deps/acorn/acorn-walk/dist/walk.js',
'deps/cjs-module-lexer/lexer.js',
'deps/cjs-module-lexer/dist/lexer.js',
'lib/_third_party_main.js',
'config.gypi',
]
deps = [
'deps/v8/tools/splaytree.mjs',
'deps/v8/tools/codemap.mjs',
'deps/v8/tools/consarray.mjs',
'deps/v8/tools/csvparser.mjs',
'deps/v8/tools/profile.mjs',
'deps/v8/tools/profile_view.mjs',
'deps/v8/tools/logreader.mjs',
'deps/v8/tools/arguments.mjs',
'deps/v8/tools/tickprocessor.mjs',
'deps/v8/tools/sourcemap.mjs',
'deps/v8/tools/tickprocessor-driver.mjs',
'deps/acorn/acorn/dist/acorn.js',
'deps/acorn/acorn-walk/dist/walk.js',
'deps/cjs-module-lexer/lexer.js',
'deps/cjs-module-lexer/dist/lexer.js',
]
noderoot = sys.argv[1]
mtimes = []
for inFile in deps:
mtimes = mtimes + [ os.path.getmtime(os.path.join(noderoot, inFile)) ]
mtimes = mtimes + [ os.path.getmtime(sys.argv[0]) ]
mtimes.sort()
mtimes.reverse()
minputs = []
for inFile in deps:
minputs = minputs + [ inFile.replace('/', os.path.sep) ]
outFile = os.path.join(noderoot, 'src/node_javascript.cc')
if not os.path.exists(outFile) or os.path.getmtime(outFile) < mtimes[0]:
subprocess.check_call([sys.executable, 'tools/js2c.py', '--directory', 'lib', '--target', 'src/node_javascript.cc', 'config.gypi'] + deps, cwd = noderoot)
|
ipython/attachments/Weave/iterators_example.py | cassiasamp/scipy-cookbook | 408 | 37569 | #!/usr/bin/env python
import sys
import numpy as npy
import pylab as P
from scipy.weave import inline, converters, blitz
from scipy.testing import measure
# Blitz conversion is terrific, but sometimes you don't have fixed array sizes
# in your problem. Fortunately numpy iterators still make writing inline
# weave code very, very simple.
def multi_iter_example():
# This is a very simple example of multi dimensional iterators, and
# their power to "broadcast" arrays of compatible shapes. It shows that
# the very same code that is entirely ignorant of dimensionality can
# achieve completely different computations based on the rules of
# broadcasting.
# it is important to know that the weave array conversion of "a"
# gives you access in C++ to:
# py_a -- PyObject *
# a_array -- PyArrayObject *
# a -- py_array->data
a = npy.ones((4,4), npy.float64)
# for the sake of driving home the "dynamic code" approach...
dtype2ctype = {
npy.dtype(npy.float64): 'double',
npy.dtype(npy.float32): 'float',
npy.dtype(npy.int32): 'int',
npy.dtype(npy.int16): 'short',
}
dt = dtype2ctype.get(a.dtype)
# this code does a = a*b inplace, broadcasting b to fit the shape of a
code = \
"""
%s *p1, *p2;
PyObject *itr;
itr = PyArray_MultiIterNew(2, a_array, b_array);
while(PyArray_MultiIter_NOTDONE(itr)) {
p1 = (%s *) PyArray_MultiIter_DATA(itr, 0);
p2 = (%s *) PyArray_MultiIter_DATA(itr, 1);
*p1 = (*p1) * (*p2);
PyArray_MultiIter_NEXT(itr);
}
""" % (dt, dt, dt)
b = npy.arange(4, dtype=a.dtype)
print '\n A B '
print a, b
# this reshaping is redundant, it would be the default broadcast
b.shape = (1,4)
inline(code, ['a', 'b'])
print "\ninline version of a*b[None,:],"
print a
a = npy.ones((4,4), npy.float64)
b = npy.arange(4, dtype=a.dtype)
b.shape = (4,1)
inline(code, ['a', 'b'])
print "\ninline version of a*b[:,None],"
print a
def data_casting_test():
# In my MR application, raw data is stored as a file with one or more
# (block-hdr, block-data) pairs. Block data is one or more
# rows of Npt complex samples in big-endian integer pairs (real, imag).
#
# At the block level, I encounter three different raw data layouts--
# 1) one plane, or slice: Y rows by 2*Npt samples
# 2) one volume: Z slices * Y rows by 2*Npt samples
# 3) one row sliced across the z-axis: Z slices by 2*Npt samples
#
# The task is to tease out one volume at a time from any given layout,
# and cast the integer precision data into a complex64 array.
# Given that contiguity is not guaranteed, and the number of dimensions
# can vary, Numpy iterators are useful to provide a single code that can
# carry out the conversion.
#
# Other solutions include:
# 1) working entirely with the string data from file.read() with string
# manipulations (simulated below).
# 2) letting numpy handle automatic byteorder/dtype conversion
nsl, nline, npt = (20,64,64)
hdr_dt = npy.dtype('>V28')
# example 1: a block is one slice of complex samples in short integer pairs
blk_dt1 = npy.dtype(('>i2', nline*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt1]})
# create an empty volume-- nsl contiguous blocks
vol = npy.empty((nsl,), dat_dt)
t = time_casting(vol[:]['data'])
P.plot(100*t/t.max(), 'b--', label='vol=20 contiguous blocks')
P.plot(100*t/t.max(), 'bo')
# example 2: a block is one entire volume
blk_dt2 = npy.dtype(('>i2', nsl*nline*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt2]})
# create an empty volume-- 1 block
vol = npy.empty((1,), dat_dt)
t = time_casting(vol[0]['data'])
P.plot(100*t/t.max(), 'g--', label='vol=1 contiguous block')
P.plot(100*t/t.max(), 'go')
# example 3: a block slices across the z dimension, long integer precision
# ALSO--a given volume is sliced discontiguously
blk_dt3 = npy.dtype(('>i4', nsl*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt3]})
# a real data set has volumes interleaved, so create two volumes here
vols = npy.empty((2*nline,), dat_dt)
# and work on casting the first volume
t = time_casting(vols[0::2]['data'])
P.plot(100*t/t.max(), 'r--', label='vol=64 discontiguous blocks')
P.plot(100*t/t.max(), 'ro')
P.xticks([0,1,2], ('strings', 'numpy auto', 'inline'))
P.gca().set_xlim((-0.25, 2.25))
P.gca().set_ylim((0, 110))
P.gca().set_ylabel(r"% of slowest time")
P.legend(loc=8)
P.title('Casting raw file data to an MR volume')
P.show()
def time_casting(int_data):
nblk = 1 if len(int_data.shape) < 2 else int_data.shape[0]
bias = (npy.random.rand(nblk) + \
1j*npy.random.rand(nblk)).astype(npy.complex64)
dstr = int_data.tostring()
dt = npy.int16 if int_data.dtype.itemsize == 2 else npy.int32
fshape = list(int_data.shape)
fshape[-1] = fshape[-1]/2
float_data = npy.empty(fshape, npy.complex64)
# method 1: string conversion
float_data.shape = (npy.product(fshape),)
tstr = measure("float_data[:] = complex_fromstring(dstr, dt)", times=25)
float_data.shape = fshape
print "to-/from- string: ", tstr, "shape=",float_data.shape
# method 2: numpy dtype magic
sl = [None, slice(None)] if len(fshape)<2 else [slice(None)]*len(fshape)
# need to loop since int_data need not be contiguous
tnpy = measure("""
for fline, iline, b in zip(float_data[sl], int_data[sl], bias):
cast_to_complex_npy(fline, iline, bias=b)""", times=25)
print"numpy automagic: ", tnpy
# method 3: plain inline brute force!
twv = measure("cast_to_complex(float_data, int_data, bias=bias)",
times=25)
print"inline casting: ", twv
return npy.array([tstr, tnpy, twv], npy.float64)
def complex_fromstring(data, numtype):
if sys.byteorder == "little":
return npy.fromstring(
npy.fromstring(data,numtype).byteswap().astype(npy.float32).tostring(),
npy.complex64)
else:
return npy.fromstring(
npy.fromstring(data,numtype).astype(npy.float32).tostring(),
npy.complex64)
def cast_to_complex(cplx_float, cplx_integer, bias=None):
if cplx_integer.dtype.itemsize == 4:
replacements = tuple(["l", "long", "SWAPLONG", "l"]*2)
else:
replacements = tuple(["s", "short", "SWAPSHORT", "s"]*2)
if sys.byteorder == "big":
replacements[-2] = replacements[-6] = "NOP"
cast_code = """
#define SWAPSHORT(x) ((short) ((x >> 8) | (x << 8)) )
#define SWAPLONG(x) ((long) ((x >> 24) | (x << 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8)) )
#define NOP(x) x
unsigned short *s;
unsigned long *l;
float repart, impart;
PyObject *itr;
itr = PyArray_IterNew(py_cplx_integer);
while(PyArray_ITER_NOTDONE(itr)) {
// get real part
%s = (unsigned %s *) PyArray_ITER_DATA(itr);
repart = %s(*%s);
PyArray_ITER_NEXT(itr);
// get imag part
%s = (unsigned %s *) PyArray_ITER_DATA(itr);
impart = %s(*%s);
PyArray_ITER_NEXT(itr);
*(cplx_float++) = std::complex<float>(repart, impart);
}
""" % replacements
inline(cast_code, ['cplx_float', 'cplx_integer'])
if bias is not None:
if len(cplx_float.shape) > 1:
bsl = [slice(None)]*(len(cplx_float.shape)-1) + [None]
else:
bsl = slice(None)
npy.subtract(cplx_float, bias[bsl], cplx_float)
def cast_to_complex_npy(cplx_float, cplx_integer, bias=None):
cplx_float.real[:] = cplx_integer[0::2]
cplx_float.imag[:] = cplx_integer[1::2]
if bias is not None:
npy.subtract(cplx_float, bias, cplx_float)
if __name__=="__main__":
data_casting_test()
multi_iter_example()
|
src/scenic/simulators/gta/img_modf.py | cahartsell/Scenic | 141 | 37596 | <gh_stars>100-1000
'''
This file has basic image modification functions
'''
from PIL import Image
import cv2
from scipy.spatial import Voronoi
from itertools import product
import numpy as np
def convert_black_white(img_data=None, img_file=None, threshold=100):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
pixels = img_copy.load()
for j,k in product(range(img_copy.size[0]), range(img_copy.size[1])):
if (np.array(pixels[j, k][0:3]) > threshold).any():
pixels[j, k] = (255, 255, 255, 255)
else:
pixels[j,k] = (0, 0, 0, 255)
return img_copy
def get_edges(img_data=None, img_file=None, threshold=100, kernelsize=1):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
# Get the black and white image
img_bw = convert_black_white(img_data=img_copy, img_file=img_file,
threshold=threshold)
cv_bw = cv2.cvtColor(np.array(img_bw), cv2.COLOR_RGB2BGR)
# Detect edges using Laplacian
laplacian = cv2.Laplacian(cv_bw, cv2.CV_8U, ksize=kernelsize)
# Convert back to Pillow image
pil_lap = Image.fromarray(laplacian)
# For computing Voronoi images, we need to squeeze the RGB data to 0s and 1s
pil_squeezed = pil_lap.convert('L')
pil_squeezed_01 = pil_squeezed.point(lambda x: 0 if x < 128 else 255, '1')
return pil_squeezed_01
def voronoi_edge(img_data=None, img_file=None, threshold=100, kernelsize=1):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
# Get 0s and 1s of the edges
pil_squeezed_01 = get_edges(img_data=img_copy, img_file=img_file,
threshold=threshold, kernelsize=kernelsize)
# Collecting point for Voronoi edge computation
nz_elements = np.nonzero(np.asarray(pil_squeezed_01))
points = np.fliplr(np.array(nz_elements).T)
vor = Voronoi(points)
vor_x = vor.vertices.T[0]
vor_y = -vor.vertices.T[1] + img_data.size[1]
# Convert the black and white image to 0s and 1s
img_bw = convert_black_white(img_data=img_copy,
img_file=img_file, threshold=threshold)
img_bw_squeezed = img_bw.convert('L')
img_bw_01 = img_bw_squeezed.point(lambda x:0 if x< 128 else 255, '1')
pixels = img_bw_01.load()
center_x = []
center_y = []
for x, y in zip(vor_x, vor_y):
if 0 < x and x < img_data.size[0] and 0 < y and y < img_data.size[1] \
and pixels[int(x), img_data.size[1]-1 -int(y)] == 0:
center_x.append(int(x))
center_y.append(int(y))
return {'edge_image':pil_squeezed_01, 'vor_center_x': center_x,
'vor_center_y': center_y}
def plot_voronoi_plot(img_data=None, img_file=None, threshold=100, kernelsize=3,
plot_name=None):
import matplotlib.pyplot as plt
assert img_data is not None or img_file is not None
vor_results = voronoi_edge(img_data=img_data, img_file=img_file,
threshold=threshold, kernelsize=kernelsize)
xlim = vor_results['edge_image'].size[0]
ylim = vor_results['edge_image'].size[1]
x_data = vor_results['vor_center_x']
y_data = vor_results['vor_center_y']
plt.figure()
plt.scatter(x_data, y_data, s=0.5)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
if plot_name is None:
plt.savefig('voronoi_fig.png')
else:
plt.savefig(plot_name+'.png')
|
distributed.py | SagaFav/etlpy | 448 | 37607 | import sys;
from queue import Queue
from multiprocessing.managers import BaseManager
import etl;
import json
import extends;
import time;
authkey= "etlpy".encode('utf-8')
timeout=1;
rpc_port=8888
class ETLJob:
def __init__(self,project,jobname,config,id):
self.project= project;
self.jobname=jobname;
self.config=config;
self.id= id;
class JobResult:
def __init__(self,name,count,id):
self.name=name;
self.count=count;
self.id=id;
class Master:
def __init__(self,project,jobname):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
self.project= project;
self.jobname=jobname;
self.maxprocess= 10;
def get_dispatched_job_queue(self):
return self.dispatched_job_queue
def get_finished_job_queue(self):
return self.finished_job_queue
def start(self,skip=0):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)
# 监听端口和启动服务
manager = BaseManager(address=('0.0.0.0', rpc_port), authkey=authkey)
manager.start()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
job_id = 0
module= self.project.modules[self.jobname];
proj=json.loads(json.dumps(etl.convert_dict(self.project,self.project.__defaultdict__), ensure_ascii=False))
while True:
for task in etl.parallel_map(module):
job_id = job_id + 1
if job_id<skip:
continue
job = ETLJob(proj, self.jobname, task, job_id);
print('Dispatch job: %s' % job.id)
dispatched_jobs.put(job)
while not dispatched_jobs.empty():
job = finished_jobs.get(60)
print('Finished Job: %s, Count: %s' % (job.id, job.count))
key=input('press any key to repeat,c to cancel')
if key=='c':
manager.shutdown()
break
#manager.shutdown()
class Slave:
def __init__(self):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
def start(self,execute= True,serverip='127.0.0.1',port=8888):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue')
BaseManager.register('get_finished_job_queue')
server = serverip;
print('Connect to server %s...' % server)
manager = BaseManager(address=(server, port), authkey=authkey)
manager.connect()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
# 运行作业并返回结果,这里只是模拟作业运行,所以返回的是接收到的作业
while True:
if dispatched_jobs.empty():
time.sleep(1)
print('queue is empty,wait 1 sec...')
continue;
job = dispatched_jobs.get(timeout=timeout)
print('Run job: %s ' % job.id)
project=job.project;
project= etl.LoadProject_dict(project);
module= project.modules[job.jobname];
count=0
try:
generator= etl.parallel_reduce(module,[ job.config],execute)
for r in generator:
count+=1;
except Exception as e:
print(e)
print('finish job,id %s, count %s'%(job.id,count))
resultjob= JobResult(job.jobname,count,job.id)
finished_jobs.put(resultjob)
if __name__ == '__main__':
ip='127.0.0.1'
port=8888;
argv=sys.argv;
if len(argv)>1:
ip=argv[1];
if len(argv)>2:
port=int(argv[2]);
slave= Slave();
slave.start(True,ip,port);
|
tests/r/test_labour.py | hajime9652/observations | 199 | 37675 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.labour import labour
def test_labour():
"""Test module labour.py by downloading
labour.csv and testing shape of
extracted data has 569 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = labour(test_path)
try:
assert x_train.shape == (569, 4)
except:
shutil.rmtree(test_path)
raise()
|
tests/test_layers/test_2p5d/checks_2p5d/common.py | RichardoLuo/ColossalAI | 1,630 | 37745 | <gh_stars>1000+
import torch
TESSERACT_DIM = 2
TESSERACT_DEP = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) |
graphgallery/gallery/linkpred/pyg/__init__.py | EdisonLeeeee/GraphGallery | 300 | 37761 | <gh_stars>100-1000
from .gae import GAE
from .vgae import VGAE
|
self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 203 | 37766 | """
--------------------------------------------------------------------------
The `self_paced_ensemble.canonical_resampling` module implement a
resampling-based classifier for imbalanced classification.
15 resampling algorithms are included:
'RUS', 'CNN', 'ENN', 'NCR', 'Tomek', 'ALLKNN', 'OSS',
'NM', 'CC', 'SMOTE', 'ADASYN', 'BorderSMOTE', 'SMOTEENN',
'SMOTETomek', 'ORG'.
Note: the implementation of these resampling algorithms is based on
imblearn python package.
See https://github.com/scikit-learn-contrib/imbalanced-learn.
--------------------------------------------------------------------------
"""
from .canonical_resampling import ResampleClassifier
__all__ = [
"ResampleClassifier",
]
|
emlearn/distance.py | Brax94/emlearn | 161 | 37772 |
import os.path
import os
import numpy
from . import common, cgen
"""
References
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_empirical_covariance.py#L297
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_elliptic_envelope.py#L149
"""
from sklearn.mixture._gaussian_mixture import _compute_log_det_cholesky
from sklearn.utils.extmath import row_norms
np = numpy
def squared_mahalanobis_distance(x1, x2, precision):
"""
@precision is the inverted covariance matrix
computes (x1 - x2).T * VI * (x1 - x2)
where VI is the precision matrix, the inverse of the covariance matrix
Loosely based on the scikit-learn implementation,
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/neighbors/_dist_metrics.pyx
"""
distance = 0.0
size = x1.shape[0]
temp = numpy.zeros(shape=size)
assert x1.shape == x2.shape
assert precision.shape[0] == precision.shape[1]
assert size == precision.shape[0]
for i in range(size):
accumulate = 0
for j in range(size):
accumulate += precision[i, j] * (x1[j] - x2[j])
distance += accumulate * (x1[i] - x2[i])
return distance
def generate_code(means, precision, offset, name='my_elliptic', modifiers='static const'):
n_features = means.shape[0]
decision_boundary = offset # FIXME, check
classifier_name = f'{name}_classifier'
means_name = f'{name}_means'
precisions_name = f'{name}_precisions'
predict_function_name = f'{name}_predict'
includes = '''
// This code is generated by emlearn
#include <eml_distance.h>
'''
pre = '\n\n'.join([
includes,
cgen.array_declare(means_name, n_features, modifiers=modifiers, values=means),
cgen.array_declare(precisions_name, n_features*n_features,
modifiers=modifiers,
values=precision.flatten(order='C'),
),
])
main = f'''
#include <stdio.h>
// Data definitions
{modifiers} EmlEllipticEnvelope {classifier_name} = {{
{n_features},
{decision_boundary},
{means_name},
{precisions_name}
}};
// Prediction function
float {predict_function_name}(const float *features, int n_features) {{
float dist = 0.0;
const int class = eml_elliptic_envelope_predict(&{classifier_name},
features, n_features, &dist);
return dist;
}}
'''
code = pre + main
return code
class Wrapper:
def __init__(self, estimator, classifier='inline', dtype='float'):
self.dtype = dtype
precision = estimator.get_precision()
self._means = estimator.location_.copy()
self._precision = precision
self._offset = estimator.offset_
if classifier == 'inline':
name = 'my_inline_elliptic'
func = '{}_predict(values, length)'.format(name)
code = self.save(name=name)
self.classifier_ = common.CompiledClassifier(code, name=name, call=func, out_dtype='float')
else:
raise ValueError("Unsupported classifier method '{}'".format(classifier))
def mahalanobis(self, X):
def dist(x):
return squared_mahalanobis_distance(x, self._means, precision=self._precision)
p = numpy.array([ dist(x) for x in X ])
predictions = self.classifier_.predict(X)
return predictions
def predict(self, X):
def predict_one(d):
dist = -d
dd = dist - self._offset
is_inlier = 1 if dd > 0 else -1
return is_inlier
distances = self.mahalanobis(X)
return numpy.array([predict_one(d) for d in distances])
def save(self, name=None, file=None):
if name is None:
if file is None:
raise ValueError('Either name or file must be provided')
else:
name = os.path.splitext(os.path.basename(file))[0]
code = generate_code(self._means, self._precision, self._offset, name=name)
if file:
with open(file, 'w') as f:
f.write(code)
return code
|
Packs/ShiftLeft/Integrations/shiftleft/shiftleft_test.py | diCagri/content | 799 | 37775 | <filename>Packs/ShiftLeft/Integrations/shiftleft/shiftleft_test.py
"""Base Integration for ShiftLeft CORE - Cortex XSOAR Extension
"""
import json
import io
from shiftleft import list_app_findings_command, ShiftLeftClient
def util_load_json(path):
with io.open(path, mode="r", encoding="utf-8") as f:
return json.loads(f.read())
def test_list_app_findings_command(requests_mock):
"""Tests list_app_findings_command function.
Checks the output of the command function with the expected output.
"""
mock_response = util_load_json("test_data/test_list_findings.json")
requests_mock.get(
"https://www.shiftleft.io/orgs/2c089ac1-3378-44d5-94da-9507e84351c3/apps/shiftleft-java-example/findings",
json=mock_response,
)
client = ShiftLeftClient(
base_url="https://www.shiftleft.io", # disable-secrets-detection
verify=False,
)
args = {
"app_name": "shiftleft-java-example",
"severity": "critical",
"type": ["vuln"],
"version": None,
}
response = list_app_findings_command(
client, "2c089ac1-3378-44d5-94da-9507e84351c3", args
)
assert response.outputs
|
src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | 174 | 37807 | <reponame>leduong/richie
# Generated by Django 2.2.15 on 2020-08-27 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("courses", "0016_auto_20200417_1237"),
]
operations = [
migrations.AlterField(
model_name="courserun",
name="resource_link",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Resource link"
),
),
]
|
proxyclient/experiments/timer_test.py | EricRabil/m1n1 | 1,604 | 37808 | <filename>proxyclient/experiments/timer_test.py
#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from m1n1.setup import *
HV_VTMR_CTL = (3, 5, 15, 1, 3)
HV_VTMR_CTL_VMASK = (1 << 0)
HV_VTMR_CTL_PMASK = (1 << 1)
HV_VTMR_LIST = (3, 5, 15, 1, 2)
TGE = (1<<27)
u.msr(CNTHCTL_EL2, 3 << 10) # EL1PTEN | EL1PCTEN
def run_test(ctl, tval):
u.inst(0xd5033fdf) # isb
u.msr(ctl, 0)
u.msr(tval, int(freq * 0.8))
u.msr(ctl, 1)
for i in range(6):
p.nop()
time.sleep(0.2)
#u.inst(0xd5033fdf, call=p.el1_call)
print(" . (ISR_EL1=%d) CTL=%x VTMR_LIST=%x" % (u.mrs(ISR_EL1), u.mrs(ctl), u.mrs(HV_VTMR_LIST)))
u.msr(ctl, 0)
def test_hv_timers():
u.msr(DAIF, 0x3c0)
print("Testing HV timers...")
print(" TGE = 1")
u.msr(HCR_EL2, u.mrs(HCR_EL2) | TGE | (1 << 3) | (1 << 4))
print(" P:")
run_test(CNTP_CTL_EL0, CNTP_TVAL_EL0)
print(" V:")
run_test(CNTV_CTL_EL0, CNTV_TVAL_EL0)
def test_guest_timers():
u.msr(DAIF, 0)
print("Testing guest timers...")
print(" TGE = 1, vGIC mode=0, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) | TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 0)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
#run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
#run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 1, vGIC mode=0, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=0, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) & ~TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 0)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=0, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=1, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) & ~TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 1<<20)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=1, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
return
freq = u.mrs(CNTFRQ_EL0)
print("Timer freq: %d" % freq)
test_hv_timers()
test_guest_timers()
|
tf_coder/value_search/search_space_from_weight.py | hstrohm/PyTorch-Coder-cheat | 245 | 37883 | <reponame>hstrohm/PyTorch-Coder-cheat<gh_stars>100-1000
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes the size of value search's search space."""
import collections
import functools
import operator
import os
import sys
from absl import app
from absl import flags
from tf_coder import tf_coder_utils
from tf_coder import tf_functions
from tf_coder.benchmarks import all_benchmarks
from tf_coder.natural_language import description_handler_factory
from tf_coder.value_search import value as value_module
from tf_coder.value_search import value_search
from tf_coder.value_search import value_search_settings as settings_module
FLAGS = flags.FLAGS
flags.DEFINE_string('benchmark_name', 'google_02',
'The name of a benchmark to analyze.')
flags.DEFINE_multi_string('settings',
[],
'Settings to override the defaults.')
# Inspired by https://stackoverflow.com/a/45669280/9589593.
class SuppressPrint(object):
"""A context manager for suppressing print() calls temporarily."""
def __enter__(self):
self._old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
del exc_type, exc_val, exc_tb
sys.stdout.close()
sys.stdout = self._old_stdout
def compute_search_space_size(benchmark, settings, description_handler):
"""Computes and prints the size of the search space.
This counts the total number of expressions with weight at most max_weight.
The weights come from the benchmark (for constants and inputs) and the
description handler (for determining the op weights). Distinct expressions
will be counted separately even if they evaluate to the same value, unlike in
TF-Coder's value_search algorithm which does value-based pruning.
Args:
benchmark: The Benchmark object defining the problem to analyze.
settings: A Settings object containing settings for value search.
description_handler: The DescriptionHandler used, which can modify weights
of operations.
Returns:
Nothing. All output is printed to stdout.
"""
max_weight = settings.max_weight
print('Computing search space.\n'
'Benchmark name: {}\n'
'Description handler: {}\n'
'Max weight: {}'.format(
benchmark.name, description_handler, max_weight))
# TODO(kshi): Update to load the tensor features model/config.
operations = value_search.get_reweighted_operations(benchmark,
settings,
description_handler,
tensor_model=None,
tensor_config=None)
# These loops are not the most efficient, but it doesn't really matter.
print('\nFound {} operations.'.format(len(operations)))
print()
for weight in range(1, max(op.weight for op in operations) + 1):
print('# operations with weight {}: {}'.format(
weight, sum(1 for op in operations if op.weight == weight)))
print()
for arity in range(1, max(op.num_args for op in operations) + 1):
print('# operations with arity {}: {}'.format(
arity, sum(1 for op in operations if op.num_args == arity)))
output_value = value_module.OutputValue(benchmark.examples[0].output)
values_by_weight = [collections.OrderedDict()
for _ in range(max_weight + 1)]
constant_operation = None
for operation in operations:
if operation.name == tf_functions.CONSTANT_OPERATION_NAME:
constant_operation = operation
break
with SuppressPrint():
value_search._add_constants_and_inputs_and_print( # pylint: disable=protected-access
values_by_weight, benchmark, output_value, constant_operation, settings)
num_expressions_with_weight = [len(values_with_weight)
for values_with_weight in values_by_weight]
print()
max_weight_with_initial_value = max(w for w in range(max_weight + 1)
if num_expressions_with_weight[w])
for weight in range(1, max_weight_with_initial_value + 1):
print('# initial values with weight {}: {}'.format(
weight, num_expressions_with_weight[weight]))
for total_weight in range(2, max_weight + 1):
for operation in operations:
# All operations should have strictly positive weight and num_args.
op_weight = operation.weight
op_arity = operation.num_args
if total_weight - op_weight < op_arity:
continue
# Partition `total_weight - op_weight` into `op_arity` positive pieces.
# Equivalently, partition `total_weight - op_weight - op_arity` into
# `op_arity` nonnegative pieces.
for partition in tf_coder_utils.generate_partitions(
total_weight - op_weight - op_arity, op_arity):
arg_weights = [part + 1 for part in partition]
num_expressions_with_weight[total_weight] += functools.reduce(
operator.mul,
(num_expressions_with_weight[w] for w in arg_weights))
print()
for weight in range(1, max_weight + 1):
print('# expressions with weight exactly {}: {}'.format(
weight, num_expressions_with_weight[weight]))
print()
for weight in range(1, max_weight + 1):
print('# expressions with weight up to {}: {}'.format(
weight, sum(num_expressions_with_weight[:weight + 1])))
def main(unused_argv):
settings = settings_module.from_list(FLAGS.settings)
description_handler = description_handler_factory.create_handler(
settings.description_handler_name)
benchmark = all_benchmarks.find_benchmark_with_name(FLAGS.benchmark_name)
if not benchmark:
raise ValueError('Unknown benchmark: {}'.format(FLAGS.benchmark_name))
compute_search_space_size(benchmark=benchmark,
settings=settings,
description_handler=description_handler)
if __name__ == '__main__':
app.run(main)
|
tests.py | mkolar/maya-capture | 118 | 37930 | <filename>tests.py
"""Tests for capture.
Within Maya, setup a scene of moderate range (e.g. 10 frames)
and run the following.
Example:
>>> nose.run(argv=[sys.argv[0], "tests", "-v"])
"""
import capture
from maya import cmds
def test_capture():
"""Plain capture works"""
capture.capture()
def test_camera_options():
"""(Optional) camera options works"""
capture.capture(camera_options={"displayGateMask": False})
def test_display_options():
"""(Optional) display options works"""
capture.capture(display_options={"displayGradient": False})
def test_viewport_options():
"""(Optional) viewport options works"""
capture.capture(viewport_options={"wireframeOnShaded": True})
def test_viewport2_options():
"""(Optional) viewport2 options works"""
capture.capture(viewport2_options={"ssaoEnable": True})
def test_parse_active_view():
"""Parse active view works"""
# Set focus to modelPanel1 (assume it exists)
# Otherwise the panel with focus (temporary panel from capture)
# got deleted and there's no "active panel"
import maya.cmds as cmds
cmds.setFocus("modelPanel1")
options = capture.parse_active_view()
capture.capture(**options)
def test_parse_view():
"""Parse view works"""
options = capture.parse_view("modelPanel1")
capture.capture(**options)
def test_apply_view():
"""Apply view works"""
capture.apply_view("modelPanel1", camera_options={"overscan": 2})
def test_apply_parsed_view():
"""Apply parsed view works"""
options = capture.parse_view("modelPanel1")
capture.apply_view("modelPanel1", **options)
def test_apply_parsed_view_exact():
"""Apply parsed view sanity check works"""
import maya.cmds as cmds
panel = "modelPanel1"
cmds.modelEditor(panel, edit=True, displayAppearance="wireframe")
parsed = capture.parse_view(panel)
display = parsed["viewport_options"]["displayAppearance"]
assert display == "wireframe"
# important to test both, just in case wireframe was already
# set when making the first query, and to make sure this
# actually does something.
cmds.modelEditor(panel, edit=True, displayAppearance="smoothShaded")
parsed = capture.parse_view(panel)
display = parsed["viewport_options"]["displayAppearance"]
assert display == "smoothShaded"
capture.apply_view(panel,
viewport_options={"displayAppearance": "wireframe"})
assert cmds.modelEditor(panel,
query=True,
displayAppearance=True) == "wireframe"
def test_apply_parsed_view_all():
"""Apply parsed view all options works"""
# A set of options all trying to be different from the default
# settings (in `capture.py`) so we can test "changing states"
camera_options = {}
display_options = {}
viewport_options = {}
viewport2_options = {}
for key, value in capture.CameraOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
else:
raise Exception("Unexpected value in CameraOptions: %s=%s"
% (key, value))
for key, value in capture.DisplayOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, tuple):
value = (1, 0, 1)
else:
raise Exception("Unexpected value in DisplayOptions: %s=%s"
% (key, value))
for key, value in capture.ViewportOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
elif isinstance(value, tuple):
value = (1, 0, 1)
elif isinstance(value, basestring):
pass # Don't bother, for now
else:
raise Exception("Unexpected value in ViewportOptions: %s=%s"
% (key, value))
for key, value in capture.Viewport2Options.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
elif isinstance(value, tuple):
value = (1, 0, 1)
elif isinstance(value, basestring):
pass # Don't bother, for now
else:
raise Exception("Unexpected value in Viewport2Options: %s=%s"
% (key, value))
defaults = {
"camera_options": capture.CameraOptions.copy(),
"display_options": capture.DisplayOptions.copy(),
"viewport_options": capture.ViewportOptions.copy(),
"viewport2_options": capture.Viewport2Options.copy(),
}
others = {
"camera_options": camera_options,
"display_options": display_options,
"viewport_options": viewport_options,
"viewport2_options": viewport2_options,
}
panel = "modelPanel1"
def compare(this, other):
"""Compare options for only settings available in `this`
Some color values will be returned with possible floating
point precision errors as such result in a slightly
different number. We'd need to compare whilst keeping
such imprecisions in mind.
"""
precision = 1e-4
for opt in this:
this_option = this[opt]
other_option = other[opt]
for key, value in this_option.iteritems():
other_value = other_option[key]
if isinstance(value, float) or isinstance(other_value, float):
if abs(value - other_value) > precision:
return False
elif isinstance(value, (tuple, list)):
# Assuming for now that any tuple or list contains floats
if not all((abs(a-b) < precision)
for a, b in zip(value, other_value)):
return False
else:
if value != other_value:
return False
return True
# Apply defaults and check
capture.apply_view(panel, **defaults)
parsed_defaults = capture.parse_view(panel)
assert compare(defaults, parsed_defaults)
# Apply others and check
capture.apply_view(panel, **others)
parsed_others = capture.parse_view(panel)
assert compare(others, parsed_others)
def test_preset():
"""Creating and applying presets works"""
preset = {
"width": 320,
"height": 240,
"camera_options": {
"displayGateMask": False
},
"viewport_options": {
"wireframeOnShaded": True
},
"display_options": {
"displayGateMask": False
}
}
capture.capture(**preset)
def test_parse_active_scene():
"""parse_active_scene() works"""
parsed = capture.parse_active_scene()
reference = {
"start_frame": cmds.playbackOptions(minTime=True, query=True),
"end_frame": cmds.playbackOptions(maxTime=True, query=True),
"width": cmds.getAttr("defaultResolution.width"),
"height": cmds.getAttr("defaultResolution.height"),
"compression": cmds.optionVar(query="playblastCompression"),
"filename": (cmds.optionVar(query="playblastFile")
if cmds.optionVar(query="playblastSaveToFile") else None),
"format": cmds.optionVar(query="playblastFormat"),
"off_screen": (True if cmds.optionVar(query="playblastOffscreen")
else False),
"show_ornaments": (True if cmds.optionVar(query="playblastShowOrnaments")
else False),
"quality": cmds.optionVar(query="playblastQuality")
}
for key, value in reference.items():
assert parsed[key] == value
|
pluribus/poker/evaluation/__init__.py | keithlee96/pluribus-poker-AI | 113 | 37983 | from .eval_card import EvaluationCard
from .evaluator import Evaluator
from .lookup import LookupTable
|
vnpy/gateway/sec/__init__.py | funrunskypalace/vnpy | 19,529 | 38018 | <filename>vnpy/gateway/sec/__init__.py
from .sec_gateway import SecGateway
|
test/unit/metrics/utils.py | alliesaizan/fairlearn | 1,142 | 38023 | <filename>test/unit/metrics/utils.py
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import fairlearn.metrics as metrics
def _get_raw_MetricFrame():
# Gets an uninitialised MetricFrame for testing purposes
return metrics.MetricFrame.__new__(metrics.MetricFrame)
|
tests/client/test_get_balance.py | kanzure/eth-testrpc | 164 | 38027 | def test_deploying_contract(client, hex_accounts):
pre_balance = client.get_balance(hex_accounts[1])
client.send_transaction(
_from=hex_accounts[0],
to=hex_accounts[1],
value=1234,
)
post_balance = client.get_balance(hex_accounts[1])
assert post_balance - pre_balance == 1234
|
math/tests/testcases.py | fuz-woo/gpython | 520 | 38067 | # Copyright 2018 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Testcases for functions in math.
#
# Each line takes the form:
#
# <testid> <function> <input_value> -> <output_value> <flags>
#
# where:
#
# <testid> is a short name identifying the test,
#
# <function> is the function to be tested (exp, cos, asinh, ...),
#
# <input_value> is a string representing a floating-point value
#
# <output_value> is the expected (ideal) output value, again
# represented as a string.
#
# <flags> is a list of the floating-point flags required by C99
#
# The possible flags are:
#
# divide-by-zero : raised when a finite input gives a
# mathematically infinite result.
#
# overflow : raised when a finite input gives a finite result that
# is too large to fit in the usual range of an IEEE 754 double.
#
# invalid : raised for invalid inputs (e.g., sqrt(-1))
#
# ignore-sign : indicates that the sign of the result is
# unspecified; e.g., if the result is given as inf,
# then both -inf and inf should be accepted as correct.
#
# Flags may appear in any order.
#
# Lines beginning with '--' (like this one) start a comment, and are
# ignored. Blank lines, or lines containing only whitespace, are also
# ignored.
# Many of the values below were computed with the help of
# version 2.4 of the MPFR library for multiple-precision
# floating-point computations with correct rounding. All output
# values in this file are (modulo yet-to-be-discovered bugs)
# correctly rounded, provided that each input and output decimal
# floating-point value below is interpreted as a representation of
# the corresponding nearest IEEE 754 double-precision value. See the
# MPFR homepage at http://www.mpfr.org for more information about the
# MPFR project.
import math
from libtest import *
from libulp import *
doc="testcases"
inf = float("inf")
nan = float("nan")
def tolerance(a, b, e):
"""Return if a-b is within tolerance e"""
d = a - b
if d < 0:
d = -d
if a != 0:
e = e * a
if e < 0:
e = -e
return d <= e
def acc_check(what, want, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(want) and got == want:
return
error = got - want
permitted_error = rel_err * abs(want)
if abs_err > permitted_error:
permitted_error = abs_err
if abs(error) < permitted_error:
return
raise AssertionError("%s: want %g, got %g: error = %g; permitted error = %g" % (what, want, got, error, permitted_error))
def t(name, fn, x, want, exc=None):
global doc
doc = name
if exc is None:
got = fn(x)
if math.isnan(want) and math.isnan(got):
return
if want == inf and got == inf:
return
if want == -inf and got == -inf:
return
if fn == math.lgamma:
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
acc_check(doc, want, got, rel_err = 5e-15, abs_err = 5e-15)
elif fn == math.erfc:
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
ulps_check(doc, want, got, 2000)
else:
ulps_check(doc, want, got, 20)
else:
try:
got = fn(x)
except exc as e:
pass
else:
assert False, "%s not raised" % exc
#
# erf: error function --
#
t("erf0000", math.erf, 0.0, 0.0)
t("erf0001", math.erf, -0.0, -0.0)
t("erf0002", math.erf, inf, 1.0)
t("erf0003", math.erf, -inf, -1.0)
t("erf0004", math.erf, nan, nan)
# tiny values
t("erf0010", math.erf, 1e-308, 1.1283791670955125e-308)
t("erf0011", math.erf, 5e-324, 4.9406564584124654e-324)
t("erf0012", math.erf, 1e-10, 1.1283791670955126e-10)
# small integers
t("erf0020", math.erf, 1, 0.84270079294971489)
t("erf0021", math.erf, 2, 0.99532226501895271)
t("erf0022", math.erf, 3, 0.99997790950300136)
t("erf0023", math.erf, 4, 0.99999998458274209)
t("erf0024", math.erf, 5, 0.99999999999846256)
t("erf0025", math.erf, 6, 1.0)
t("erf0030", math.erf, -1, -0.84270079294971489)
t("erf0031", math.erf, -2, -0.99532226501895271)
t("erf0032", math.erf, -3, -0.99997790950300136)
t("erf0033", math.erf, -4, -0.99999998458274209)
t("erf0034", math.erf, -5, -0.99999999999846256)
t("erf0035", math.erf, -6, -1.0)
# huge values should all go to +/-1, depending on sign
t("erf0040", math.erf, -40, -1.0)
t("erf0041", math.erf, 1e16, 1.0)
t("erf0042", math.erf, -1e150, -1.0)
t("erf0043", math.erf, 1.7e308, 1.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erf0100", math.erf, 26.2, 1.0)
t("erf0101", math.erf, 26.4, 1.0)
t("erf0102", math.erf, 26.6, 1.0)
t("erf0103", math.erf, 26.8, 1.0)
t("erf0104", math.erf, 27.0, 1.0)
t("erf0105", math.erf, 27.2, 1.0)
t("erf0106", math.erf, 27.4, 1.0)
t("erf0107", math.erf, 27.6, 1.0)
t("erf0110", math.erf, -26.2, -1.0)
t("erf0111", math.erf, -26.4, -1.0)
t("erf0112", math.erf, -26.6, -1.0)
t("erf0113", math.erf, -26.8, -1.0)
t("erf0114", math.erf, -27.0, -1.0)
t("erf0115", math.erf, -27.2, -1.0)
t("erf0116", math.erf, -27.4, -1.0)
t("erf0117", math.erf, -27.6, -1.0)
#
# erfc: complementary error function --
#
t("erfc0000", math.erfc, 0.0, 1.0)
t("erfc0001", math.erfc, -0.0, 1.0)
t("erfc0002", math.erfc, inf, 0.0)
t("erfc0003", math.erfc, -inf, 2.0)
t("erfc0004", math.erfc, nan, nan)
# tiny values
t("erfc0010", math.erfc, 1e-308, 1.0)
t("erfc0011", math.erfc, 5e-324, 1.0)
t("erfc0012", math.erfc, 1e-10, 0.99999999988716204)
# small integers
t("erfc0020", math.erfc, 1, 0.15729920705028513)
t("erfc0021", math.erfc, 2, 0.0046777349810472662)
t("erfc0022", math.erfc, 3, 2.2090496998585441e-05)
t("erfc0023", math.erfc, 4, 1.541725790028002e-08)
t("erfc0024", math.erfc, 5, 1.5374597944280349e-12)
t("erfc0025", math.erfc, 6, 2.1519736712498913e-17)
t("erfc0030", math.erfc, -1, 1.8427007929497148)
t("erfc0031", math.erfc, -2, 1.9953222650189528)
t("erfc0032", math.erfc, -3, 1.9999779095030015)
t("erfc0033", math.erfc, -4, 1.9999999845827421)
t("erfc0034", math.erfc, -5, 1.9999999999984626)
t("erfc0035", math.erfc, -6, 2.0)
# as x -> infinity, erfc(x) behaves like exp(-x*x)/x/sqrt(pi)
t("erfc0040", math.erfc, 20, 5.3958656116079012e-176)
t("erfc0041", math.erfc, 25, 8.3001725711965228e-274)
# FIXME(underflows to 0) t("erfc0042", math.erfc, 27, 5.2370464393526292e-319)
t("erfc0043", math.erfc, 28, 0.0)
# huge values
t("erfc0050", math.erfc, -40, 2.0)
t("erfc0051", math.erfc, 1e16, 0.0)
t("erfc0052", math.erfc, -1e150, 2.0)
t("erfc0053", math.erfc, 1.7e308, 0.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erfc0100", math.erfc, 26.2, 1.6432507924389461e-300)
t("erfc0101", math.erfc, 26.4, 4.4017768588035426e-305)
t("erfc0102", math.erfc, 26.6, 1.0885125885442269e-309)
# FIXME(underflows to 0) t("erfc0103", math.erfc, 26.8, 2.4849621571966629e-314)
# FIXME(underflows to 0) t("erfc0104", math.erfc, 27.0, 5.2370464393526292e-319)
# FIXME(underflows to 0) t("erfc0105", math.erfc, 27.2, 9.8813129168249309e-324)
t("erfc0106", math.erfc, 27.4, 0.0)
t("erfc0107", math.erfc, 27.6, 0.0)
t("erfc0110", math.erfc, -26.2, 2.0)
t("erfc0111", math.erfc, -26.4, 2.0)
t("erfc0112", math.erfc, -26.6, 2.0)
t("erfc0113", math.erfc, -26.8, 2.0)
t("erfc0114", math.erfc, -27.0, 2.0)
t("erfc0115", math.erfc, -27.2, 2.0)
t("erfc0116", math.erfc, -27.4, 2.0)
t("erfc0117", math.erfc, -27.6, 2.0)
#
# lgamma: log of absolute value of the gamma function --
#
# special values
t("lgam0000", math.lgamma, 0.0, inf, ValueError)
t("lgam0001", math.lgamma, -0.0, inf, ValueError)
t("lgam0002", math.lgamma, inf, inf)
# FIXME(ValueError) t("lgam0003", math.lgamma, -inf, inf)
t("lgam0004", math.lgamma, nan, nan)
# negative integers
t("lgam0010", math.lgamma, -1, inf, ValueError)
t("lgam0011", math.lgamma, -2, inf, ValueError)
t("lgam0012", math.lgamma, -1e16, inf, ValueError)
t("lgam0013", math.lgamma, -1e300, inf, ValueError)
t("lgam0014", math.lgamma, -1.79e308, inf, ValueError)
# small positive integers give factorials
t("lgam0020", math.lgamma, 1, 0.0)
t("lgam0021", math.lgamma, 2, 0.0)
t("lgam0022", math.lgamma, 3, 0.69314718055994529)
t("lgam0023", math.lgamma, 4, 1.791759469228055)
t("lgam0024", math.lgamma, 5, 3.1780538303479458)
t("lgam0025", math.lgamma, 6, 4.7874917427820458)
# half integers
t("lgam0030", math.lgamma, 0.5, 0.57236494292470008)
t("lgam0031", math.lgamma, 1.5, -0.12078223763524522)
t("lgam0032", math.lgamma, 2.5, 0.28468287047291918)
t("lgam0033", math.lgamma, 3.5, 1.2009736023470743)
t("lgam0034", math.lgamma, -0.5, 1.2655121234846454)
t("lgam0035", math.lgamma, -1.5, 0.86004701537648098)
t("lgam0036", math.lgamma, -2.5, -0.056243716497674054)
t("lgam0037", math.lgamma, -3.5, -1.309006684993042)
# values near 0
t("lgam0040", math.lgamma, 0.1, 2.252712651734206)
t("lgam0041", math.lgamma, 0.01, 4.5994798780420219)
t("lgam0042", math.lgamma, 1e-8, 18.420680738180209)
t("lgam0043", math.lgamma, 1e-16, 36.841361487904734)
t("lgam0044", math.lgamma, 1e-30, 69.077552789821368)
t("lgam0045", math.lgamma, 1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0046", math.lgamma, 1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0047", math.lgamma, 5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0048", math.lgamma, 5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0049", math.lgamma, 1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0050", math.lgamma, 1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0051", math.lgamma, 5e-324, 744.44007192138122)
t("lgam0060", math.lgamma, -0.1, 2.3689613327287886)
t("lgam0061", math.lgamma, -0.01, 4.6110249927528013)
t("lgam0062", math.lgamma, -1e-8, 18.420680749724522)
t("lgam0063", math.lgamma, -1e-16, 36.841361487904734)
t("lgam0064", math.lgamma, -1e-30, 69.077552789821368)
t("lgam0065", math.lgamma, -1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0066", math.lgamma, -1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0067", math.lgamma, -5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0068", math.lgamma, -5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0069", math.lgamma, -1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0070", math.lgamma, -1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0071", math.lgamma, -5e-324, 744.44007192138122)
# values near negative integers
t("lgam0080", math.lgamma, -0.99999999999999989, 36.736800569677101)
t("lgam0081", math.lgamma, -1.0000000000000002, 36.043653389117154)
t("lgam0082", math.lgamma, -1.9999999999999998, 35.350506208557213)
t("lgam0083", math.lgamma, -2.0000000000000004, 34.657359027997266)
t("lgam0084", math.lgamma, -100.00000000000001, -331.85460524980607)
t("lgam0085", math.lgamma, -99.999999999999986, -331.85460524980596)
# large inputs
t("lgam0100", math.lgamma, 170, 701.43726380873704)
t("lgam0101", math.lgamma, 171, 706.57306224578736)
t("lgam0102", math.lgamma, 171.624, 709.78077443669895)
t("lgam0103", math.lgamma, 171.625, 709.78591682948365)
t("lgam0104", math.lgamma, 172, 711.71472580228999)
t("lgam0105", math.lgamma, 2000, 13198.923448054265)
t("lgam0106", math.lgamma, 2.55998332785163e305, 1.7976931348623099e+308)
t("lgam0107", math.lgamma, 2.55998332785164e305, inf, OverflowError)
t("lgam0108", math.lgamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("lgam0120", math.lgamma, -100.5, -364.90096830942736)
t("lgam0121", math.lgamma, -160.5, -656.88005261126432)
t("lgam0122", math.lgamma, -170.5, -707.99843314507882)
t("lgam0123", math.lgamma, -171.5, -713.14301641168481)
t("lgam0124", math.lgamma, -176.5, -738.95247590846486)
t("lgam0125", math.lgamma, -177.5, -744.13144651738037)
t("lgam0126", math.lgamma, -178.5, -749.3160351186001)
t("lgam0130", math.lgamma, -1000.5, -5914.4377011168517)
t("lgam0131", math.lgamma, -30000.5, -279278.6629959144)
# FIXME t("lgam0132", math.lgamma, -4503599627370495.5, -1.5782258434492883e+17)
# results close to 0: positive argument ...
t("lgam0150", math.lgamma, 0.99999999999999989, 6.4083812134800075e-17)
t("lgam0151", math.lgamma, 1.0000000000000002, -1.2816762426960008e-16)
t("lgam0152", math.lgamma, 1.9999999999999998, -9.3876980655431170e-17)
t("lgam0153", math.lgamma, 2.0000000000000004, 1.8775396131086244e-16)
# ... and negative argument
# these are very inaccurate in python3
t("lgam0160", math.lgamma, -2.7476826467, -5.2477408147689136e-11)
t("lgam0161", math.lgamma, -2.457024738, 3.3464637541912932e-10)
#
# gamma: Gamma function --
#
# special values
t("gam0000", math.gamma, 0.0, inf, ValueError)
t("gam0001", math.gamma, -0.0, -inf, ValueError)
t("gam0002", math.gamma, inf, inf)
t("gam0003", math.gamma, -inf, nan, ValueError)
t("gam0004", math.gamma, nan, nan)
# negative integers inputs are invalid
t("gam0010", math.gamma, -1, nan, ValueError)
t("gam0011", math.gamma, -2, nan, ValueError)
t("gam0012", math.gamma, -1e16, nan, ValueError)
t("gam0013", math.gamma, -1e300, nan, ValueError)
# small positive integers give factorials
t("gam0020", math.gamma, 1, 1)
t("gam0021", math.gamma, 2, 1)
t("gam0022", math.gamma, 3, 2)
t("gam0023", math.gamma, 4, 6)
t("gam0024", math.gamma, 5, 24)
t("gam0025", math.gamma, 6, 120)
# half integers
t("gam0030", math.gamma, 0.5, 1.7724538509055161)
t("gam0031", math.gamma, 1.5, 0.88622692545275805)
t("gam0032", math.gamma, 2.5, 1.3293403881791370)
t("gam0033", math.gamma, 3.5, 3.3233509704478426)
t("gam0034", math.gamma, -0.5, -3.5449077018110322)
t("gam0035", math.gamma, -1.5, 2.3632718012073548)
t("gam0036", math.gamma, -2.5, -0.94530872048294190)
t("gam0037", math.gamma, -3.5, 0.27008820585226911)
# values near 0
t("gam0040", math.gamma, 0.1, 9.5135076986687306)
t("gam0041", math.gamma, 0.01, 99.432585119150602)
t("gam0042", math.gamma, 1e-8, 99999999.422784343)
t("gam0043", math.gamma, 1e-16, 10000000000000000)
t("gam0044", math.gamma, 1e-30, 9.9999999999999988e+29)
t("gam0045", math.gamma, 1e-160, 1.0000000000000000e+160)
t("gam0046", math.gamma, 1e-308, 1.0000000000000000e+308)
t("gam0047", math.gamma, 5.6e-309, 1.7857142857142848e+308)
t("gam0048", math.gamma, 5.5e-309, inf, OverflowError)
t("gam0049", math.gamma, 1e-309, inf, OverflowError)
t("gam0050", math.gamma, 1e-323, inf, OverflowError)
t("gam0051", math.gamma, 5e-324, inf, OverflowError)
t("gam0060", math.gamma, -0.1, -10.686287021193193)
t("gam0061", math.gamma, -0.01, -100.58719796441078)
t("gam0062", math.gamma, -1e-8, -100000000.57721567)
t("gam0063", math.gamma, -1e-16, -10000000000000000)
t("gam0064", math.gamma, -1e-30, -9.9999999999999988e+29)
t("gam0065", math.gamma, -1e-160, -1.0000000000000000e+160)
t("gam0066", math.gamma, -1e-308, -1.0000000000000000e+308)
t("gam0067", math.gamma, -5.6e-309, -1.7857142857142848e+308)
t("gam0068", math.gamma, -5.5e-309, -inf, OverflowError)
t("gam0069", math.gamma, -1e-309, -inf, OverflowError)
t("gam0070", math.gamma, -1e-323, -inf, OverflowError)
t("gam0071", math.gamma, -5e-324, -inf, OverflowError)
# values near negative integers
t("gam0080", math.gamma, -0.99999999999999989, -9007199254740992.0)
t("gam0081", math.gamma, -1.0000000000000002, 4503599627370495.5)
t("gam0082", math.gamma, -1.9999999999999998, 2251799813685248.5)
t("gam0083", math.gamma, -2.0000000000000004, -1125899906842623.5)
t("gam0084", math.gamma, -100.00000000000001, -7.5400833348831090e-145)
t("gam0085", math.gamma, -99.999999999999986, 7.5400833348840962e-145)
# large inputs
t("gam0100", math.gamma, 170, 4.2690680090047051e+304)
t("gam0101", math.gamma, 171, 7.2574156153079990e+306)
# FIXME(overflows) t("gam0102", math.gamma, 171.624, 1.7942117599248104e+308)
t("gam0103", math.gamma, 171.625, inf, OverflowError)
t("gam0104", math.gamma, 172, inf, OverflowError)
t("gam0105", math.gamma, 2000, inf, OverflowError)
t("gam0106", math.gamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("gam0120", math.gamma, -100.5, -3.3536908198076787e-159)
t("gam0121", math.gamma, -160.5, -5.2555464470078293e-286)
t("gam0122", math.gamma, -170.5, -3.3127395215386074e-308)
# Reported as https://github.com/golang/go/issues/11441
# FIXME(overflows) t("gam0123", math.gamma, -171.5, 1.9316265431711902e-310)
# FIXME(overflows) t("gam0124", math.gamma, -176.5, -1.1956388629358166e-321)
# FIXME(overflows) t("gam0125", math.gamma, -177.5, 4.9406564584124654e-324)
# FIXME(overflows) t("gam0126", math.gamma, -178.5, -0.0)
# FIXME(overflows) t("gam0127", math.gamma, -179.5, 0.0)
# FIXME(overflows) t("gam0128", math.gamma, -201.0001, 0.0)
# FIXME(overflows) t("gam0129", math.gamma, -202.9999, -0.0)
# FIXME(overflows) t("gam0130", math.gamma, -1000.5, -0.0)
# FIXME(overflows) t("gam0131", math.gamma, -1000000000.3, -0.0)
# FIXME(overflows) t("gam0132", math.gamma, -4503599627370495.5, 0.0)
# inputs that cause problems for the standard reflection formula,
# thanks to loss of accuracy in 1-x
t("gam0140", math.gamma, -63.349078729022985, 4.1777971677761880e-88)
t("gam0141", math.gamma, -127.45117632943295, 1.1831110896236810e-214)
#
# log1p: log(1 + x), without precision loss for small x --
#
# special values
t("log1p0000", math.log1p, 0.0, 0.0)
t("log1p0001", math.log1p, -0.0, -0.0)
t("log1p0002", math.log1p, inf, inf)
t("log1p0003", math.log1p, -inf, nan, ValueError)
t("log1p0004", math.log1p, nan, nan)
# singularity at -1.0
t("log1p0010", math.log1p, -1.0, -inf, ValueError)
t("log1p0011", math.log1p, -0.9999999999999999, -36.736800569677101)
# finite values < 1.0 are invalid
t("log1p0020", math.log1p, -1.0000000000000002, nan, ValueError)
t("log1p0021", math.log1p, -1.1, nan, ValueError)
t("log1p0022", math.log1p, -2.0, nan, ValueError)
t("log1p0023", math.log1p, -1e300, nan, ValueError)
# tiny x: log1p(x) ~ x
t("log1p0110", math.log1p, 5e-324, 5e-324)
t("log1p0111", math.log1p, 1e-320, 1e-320)
t("log1p0112", math.log1p, 1e-300, 1e-300)
t("log1p0113", math.log1p, 1e-150, 1e-150)
t("log1p0114", math.log1p, 1e-20, 1e-20)
t("log1p0120", math.log1p, -5e-324, -5e-324)
t("log1p0121", math.log1p, -1e-320, -1e-320)
t("log1p0122", math.log1p, -1e-300, -1e-300)
t("log1p0123", math.log1p, -1e-150, -1e-150)
t("log1p0124", math.log1p, -1e-20, -1e-20)
# some (mostly) random small and moderate-sized values
t("log1p0200", math.log1p, -0.89156889782277482, -2.2216403106762863)
t("log1p0201", math.log1p, -0.23858496047770464, -0.27257668276980057)
t("log1p0202", math.log1p, -0.011641726191307515, -0.011710021654495657)
t("log1p0203", math.log1p, -0.0090126398571693817, -0.0090534993825007650)
t("log1p0204", math.log1p, -0.00023442805985712781, -0.00023445554240995693)
t("log1p0205", math.log1p, -1.5672870980936349e-5, -1.5672993801662046e-5)
t("log1p0206", math.log1p, -7.9650013274825295e-6, -7.9650330482740401e-6)
t("log1p0207", math.log1p, -2.5202948343227410e-7, -2.5202951519170971e-7)
t("log1p0208", math.log1p, -8.2446372820745855e-11, -8.2446372824144559e-11)
t("log1p0209", math.log1p, -8.1663670046490789e-12, -8.1663670046824230e-12)
t("log1p0210", math.log1p, 7.0351735084656292e-18, 7.0351735084656292e-18)
t("log1p0211", math.log1p, 5.2732161907375226e-12, 5.2732161907236188e-12)
t("log1p0212", math.log1p, 1.0000000000000000e-10, 9.9999999995000007e-11)
t("log1p0213", math.log1p, 2.1401273266000197e-9, 2.1401273243099470e-9)
t("log1p0214", math.log1p, 1.2668914653979560e-8, 1.2668914573728861e-8)
t("log1p0215", math.log1p, 1.6250007816299069e-6, 1.6249994613175672e-6)
t("log1p0216", math.log1p, 8.3740495645839399e-6, 8.3740145024266269e-6)
t("log1p0217", math.log1p, 3.0000000000000001e-5, 2.9999550008999799e-5)
t("log1p0218", math.log1p, 0.0070000000000000001, 0.0069756137364252423)
t("log1p0219", math.log1p, 0.013026235315053002, 0.012942123564008787)
t("log1p0220", math.log1p, 0.013497160797236184, 0.013406885521915038)
t("log1p0221", math.log1p, 0.027625599078135284, 0.027250897463483054)
t("log1p0222", math.log1p, 0.14179687245544870, 0.13260322540908789)
# large values
t("log1p0300", math.log1p, 1.7976931348623157e+308, 709.78271289338397)
t("log1p0301", math.log1p, 1.0000000000000001e+300, 690.77552789821368)
t("log1p0302", math.log1p, 1.0000000000000001e+70, 161.18095650958321)
t("log1p0303", math.log1p, 10000000000.000000, 23.025850930040455)
# other values transferred from testLog1p in test_math
t("log1p0400", math.log1p, -0.63212055882855767, -1.0000000000000000)
t("log1p0401", math.log1p, 1.7182818284590451, 1.0000000000000000)
t("log1p0402", math.log1p, 1.0000000000000000, 0.69314718055994529)
t("log1p0403", math.log1p, 1.2379400392853803e+27, 62.383246250395075)
#
# expm1: exp(x) - 1, without precision loss for small x --
#
# special values
t("expm10000", math.expm1, 0.0, 0.0)
t("expm10001", math.expm1, -0.0, -0.0)
t("expm10002", math.expm1, inf, inf)
t("expm10003", math.expm1, -inf, -1.0)
t("expm10004", math.expm1, nan, nan)
# expm1(x) ~ x for tiny x
t("expm10010", math.expm1, 5e-324, 5e-324)
t("expm10011", math.expm1, 1e-320, 1e-320)
t("expm10012", math.expm1, 1e-300, 1e-300)
t("expm10013", math.expm1, 1e-150, 1e-150)
t("expm10014", math.expm1, 1e-20, 1e-20)
t("expm10020", math.expm1, -5e-324, -5e-324)
t("expm10021", math.expm1, -1e-320, -1e-320)
t("expm10022", math.expm1, -1e-300, -1e-300)
t("expm10023", math.expm1, -1e-150, -1e-150)
t("expm10024", math.expm1, -1e-20, -1e-20)
# moderate sized values, where direct evaluation runs into trouble
t("expm10100", math.expm1, 1e-10, 1.0000000000500000e-10)
t("expm10101", math.expm1, -9.9999999999999995e-08, -9.9999995000000163e-8)
t("expm10102", math.expm1, 3.0000000000000001e-05, 3.0000450004500034e-5)
t("expm10103", math.expm1, -0.0070000000000000001, -0.0069755570667648951)
t("expm10104", math.expm1, -0.071499208740094633, -0.069002985744820250)
t("expm10105", math.expm1, -0.063296004180116799, -0.061334416373633009)
t("expm10106", math.expm1, 0.02390954035597756, 0.024197665143819942)
t("expm10107", math.expm1, 0.085637352649044901, 0.089411184580357767)
t("expm10108", math.expm1, 0.5966174947411006, 0.81596588596501485)
t("expm10109", math.expm1, 0.30247206212075139, 0.35319987035848677)
t("expm10110", math.expm1, 0.74574727375889516, 1.1080161116737459)
t("expm10111", math.expm1, 0.97767512926555711, 1.6582689207372185)
t("expm10112", math.expm1, 0.8450154566787712, 1.3280137976535897)
t("expm10113", math.expm1, -0.13979260323125264, -0.13046144381396060)
t("expm10114", math.expm1, -0.52899322039643271, -0.41080213643695923)
t("expm10115", math.expm1, -0.74083261478900631, -0.52328317124797097)
t("expm10116", math.expm1, -0.93847766984546055, -0.60877704724085946)
t("expm10117", math.expm1, 10.0, 22025.465794806718)
t("expm10118", math.expm1, 27.0, 532048240600.79865)
t("expm10119", math.expm1, 123, 2.6195173187490626e+53)
t("expm10120", math.expm1, -12.0, -0.99999385578764666)
t("expm10121", math.expm1, -35.100000000000001, -0.99999999999999944)
# extreme negative values
t("expm10201", math.expm1, -37.0, -0.99999999999999989)
t("expm10200", math.expm1, -38.0, -1.0)
# FIXME(overflows) t("expm10210", math.expm1, -710.0, -1.0)
# the formula expm1(x) = 2 * sinh(x/2) * exp(x/2) doesn't work so
# well when exp(x/2) is subnormal or underflows to zero; check we're
# not using it!
# Reported as https://github.com/golang/go/issues/11442
# FIXME(overflows) t("expm10211", math.expm1, -1420.0, -1.0)
# FIXME(overflows) t("expm10212", math.expm1, -1450.0, -1.0)
# FIXME(overflows) t("expm10213", math.expm1, -1500.0, -1.0)
# FIXME(overflows) t("expm10214", math.expm1, -1e50, -1.0)
# FIXME(overflows) t("expm10215", math.expm1, -1.79e308, -1.0)
# extreme positive values
# FIXME(fails on 32 bit) t("expm10300", math.expm1, 300, 1.9424263952412558e+130)
# FIXME(fails on 32 bit) t("expm10301", math.expm1, 700, 1.0142320547350045e+304)
# the next test (expm10302) is disabled because it causes failure on
# OS X 10.4/Intel: apparently all values over 709.78 produce an
# overflow on that platform. See issue #7575.
# expm10302 expm1 709.78271289328393 -> 1.7976931346824240e+308
t("expm10303", math.expm1, 709.78271289348402, inf, OverflowError)
t("expm10304", math.expm1, 1000, inf, OverflowError)
t("expm10305", math.expm1, 1e50, inf, OverflowError)
t("expm10306", math.expm1, 1.79e308, inf, OverflowError)
# weaker version of expm10302
# FIXME(fails on 32 bit) t("expm10307", math.expm1, 709.5, 1.3549863193146328e+308)
#
# log2: log to base 2 --
#
# special values
t("log20000", math.log2, 0.0, -inf, ValueError)
t("log20001", math.log2, -0.0, -inf, ValueError)
t("log20002", math.log2, inf, inf)
t("log20003", math.log2, -inf, nan, ValueError)
t("log20004", math.log2, nan, nan)
# exact value at 1.0
t("log20010", math.log2, 1.0, 0.0)
# negatives
t("log20020", math.log2, -5e-324, nan, ValueError)
t("log20021", math.log2, -1.0, nan, ValueError)
t("log20022", math.log2, -1.7e-308, nan, ValueError)
# exact values at powers of 2
t("log20100", math.log2, 2.0, 1.0)
t("log20101", math.log2, 4.0, 2.0)
t("log20102", math.log2, 8.0, 3.0)
t("log20103", math.log2, 16.0, 4.0)
t("log20104", math.log2, 32.0, 5.0)
t("log20105", math.log2, 64.0, 6.0)
t("log20106", math.log2, 128.0, 7.0)
t("log20107", math.log2, 256.0, 8.0)
t("log20108", math.log2, 512.0, 9.0)
t("log20109", math.log2, 1024.0, 10.0)
t("log20110", math.log2, 2048.0, 11.0)
t("log20200", math.log2, 0.5, -1.0)
t("log20201", math.log2, 0.25, -2.0)
t("log20202", math.log2, 0.125, -3.0)
t("log20203", math.log2, 0.0625, -4.0)
# values close to 1.0
# FIXME(inaccurate) t("log20300", math.log2, 1.0000000000000002, 3.2034265038149171e-16)
# FIXME(inaccurate) t("log20301", math.log2, 1.0000000001, 1.4426951601859516e-10)
# FIXME(inaccurate) t("log20302", math.log2, 1.00001, 1.4426878274712997e-5)
t("log20310", math.log2, 0.9999999999999999, -1.6017132519074588e-16)
t("log20311", math.log2, 0.9999999999, -1.4426951603302210e-10)
t("log20312", math.log2, 0.99999, -1.4427022544056922e-5)
# tiny values
t("log20400", math.log2, 5e-324, -1074.0)
t("log20401", math.log2, 1e-323, -1073.0)
t("log20402", math.log2, 1.5e-323, -1072.4150374992789)
t("log20403", math.log2, 2e-323, -1072.0)
t("log20410", math.log2, 1e-308, -1023.1538532253076)
t("log20411", math.log2, 2.2250738585072014e-308, -1022.0)
t("log20412", math.log2, 4.4501477170144028e-308, -1021.0)
t("log20413", math.log2, 1e-307, -1019.8319251304202)
# huge values
t("log20500", math.log2, 1.7976931348623157e+308, 1024.0)
t("log20501", math.log2, 1.7e+308, 1023.9193879716706)
t("log20502", math.log2, 8.9884656743115795e+307, 1023.0)
# selection of random values
t("log20600", math.log2, -7.2174324841039838e+289, nan, ValueError)
t("log20601", math.log2, -2.861319734089617e+265, nan, ValueError)
t("log20602", math.log2, -4.3507646894008962e+257, nan, ValueError)
t("log20603", math.log2, -6.6717265307520224e+234, nan, ValueError)
t("log20604", math.log2, -3.9118023786619294e+229, nan, ValueError)
t("log20605", math.log2, -1.5478221302505161e+206, nan, ValueError)
t("log20606", math.log2, -1.4380485131364602e+200, nan, ValueError)
t("log20607", math.log2, -3.7235198730382645e+185, nan, ValueError)
t("log20608", math.log2, -1.0472242235095724e+184, nan, ValueError)
t("log20609", math.log2, -5.0141781956163884e+160, nan, ValueError)
t("log20610", math.log2, -2.1157958031160324e+124, nan, ValueError)
t("log20611", math.log2, -7.9677558612567718e+90, nan, ValueError)
t("log20612", math.log2, -5.5553906194063732e+45, nan, ValueError)
t("log20613", math.log2, -16573900952607.953, nan, ValueError)
t("log20614", math.log2, -37198371019.888618, nan, ValueError)
t("log20615", math.log2, -6.0727115121422674e-32, nan, ValueError)
t("log20616", math.log2, -2.5406841656526057e-38, nan, ValueError)
t("log20617", math.log2, -4.9056766703267657e-43, nan, ValueError)
t("log20618", math.log2, -2.1646786075228305e-71, nan, ValueError)
t("log20619", math.log2, -2.470826790488573e-78, nan, ValueError)
t("log20620", math.log2, -3.8661709303489064e-165, nan, ValueError)
t("log20621", math.log2, -1.0516496976649986e-182, nan, ValueError)
t("log20622", math.log2, -1.5935458614317996e-255, nan, ValueError)
t("log20623", math.log2, -2.8750977267336654e-293, nan, ValueError)
t("log20624", math.log2, -7.6079466794732585e-296, nan, ValueError)
t("log20625", math.log2, 3.2073253539988545e-307, -1018.1505544209213)
t("log20626", math.log2, 1.674937885472249e-244, -809.80634755783126)
t("log20627", math.log2, 1.0911259044931283e-214, -710.76679472274213)
t("log20628", math.log2, 2.0275372624809709e-154, -510.55719818383272)
t("log20629", math.log2, 7.3926087369631841e-115, -379.13564735312292)
t("log20630", math.log2, 1.3480198206342423e-86, -285.25497445094436)
t("log20631", math.log2, 8.9927384655719947e-83, -272.55127136401637)
t("log20632", math.log2, 3.1452398713597487e-60, -197.66251564496875)
t("log20633", math.log2, 7.0706573215457351e-55, -179.88420087782217)
t("log20634", math.log2, 3.1258285390731669e-49, -161.13023800505653)
t("log20635", math.log2, 8.2253046627829942e-41, -133.15898277355879)
t("log20636", math.log2, 7.8691367397519897e+49, 165.75068202732419)
t("log20637", math.log2, 2.9920561983925013e+64, 214.18453534573757)
t("log20638", math.log2, 4.7827254553946841e+77, 258.04629628445673)
t("log20639", math.log2, 3.1903566496481868e+105, 350.47616767491166)
t("log20640", math.log2, 5.6195082449502419e+113, 377.86831861008250)
t("log20641", math.log2, 9.9625658250651047e+125, 418.55752921228753)
t("log20642", math.log2, 2.7358945220961532e+145, 483.13158636923413)
t("log20643", math.log2, 2.785842387926931e+174, 579.49360214860280)
t("log20644", math.log2, 2.4169172507252751e+193, 642.40529039289652)
t("log20645", math.log2, 3.1689091206395632e+205, 682.65924573798395)
t("log20646", math.log2, 2.535995592365391e+208, 692.30359597460460)
t("log20647", math.log2, 6.2011236566089916e+233, 776.64177576730913)
t("log20648", math.log2, 2.1843274820677632e+253, 841.57499717289647)
t("log20649", math.log2, 8.7493931063474791e+297, 989.74182713073981)
doc="finished"
|
dlib/tools/python/test/test_vector.py | asm-jaime/facerec | 11,719 | 38077 | <reponame>asm-jaime/facerec
from dlib import vector, vectors, vectorss, dot
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import raises
def test_vector_empty_init():
v = vector()
assert len(v) == 0
assert v.shape == (0, 1)
assert str(v) == ""
assert repr(v) == "dlib.vector([])"
def test_vector_init_with_number():
v = vector(3)
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "0\n0\n0"
assert repr(v) == "dlib.vector([0, 0, 0])"
def test_vector_set_size():
v = vector(3)
v.set_size(0)
assert len(v) == 0
assert v.shape == (0, 1)
v.resize(10)
assert len(v) == 10
assert v.shape == (10, 1)
for i in range(10):
assert v[i] == 0
def test_vector_init_with_list():
v = vector([1, 2, 3])
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "1\n2\n3"
assert repr(v) == "dlib.vector([1, 2, 3])"
def test_vector_getitem():
v = vector([1, 2, 3])
assert v[0] == 1
assert v[-1] == 3
assert v[1] == v[-2]
def test_vector_slice():
v = vector([1, 2, 3, 4, 5])
v_slice = v[1:4]
assert len(v_slice) == 3
for idx, val in enumerate([2, 3, 4]):
assert v_slice[idx] == val
v_slice = v[-3:-1]
assert len(v_slice) == 2
for idx, val in enumerate([3, 4]):
assert v_slice[idx] == val
v_slice = v[1:-2]
assert len(v_slice) == 2
for idx, val in enumerate([2, 3]):
assert v_slice[idx] == val
def test_vector_invalid_getitem():
v = vector([1, 2, 3])
with raises(IndexError):
v[-4]
with raises(IndexError):
v[3]
def test_vector_init_with_negative_number():
with raises(Exception):
vector(-3)
def test_dot():
v1 = vector([1, 0])
v2 = vector([0, 1])
v3 = vector([-1, 0])
assert dot(v1, v1) == 1
assert dot(v1, v2) == 0
assert dot(v1, v3) == -1
def test_vector_serialization():
v = vector([1, 2, 3])
ser = pickle.dumps(v, 2)
deser = pickle.loads(ser)
assert str(v) == str(deser)
def generate_test_vectors():
vs = vectors()
vs.append(vector([0, 1, 2]))
vs.append(vector([3, 4, 5]))
vs.append(vector([6, 7, 8]))
assert len(vs) == 3
return vs
def generate_test_vectorss():
vss = vectorss()
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
assert len(vss) == 3
return vss
def test_vectors_serialization():
vs = generate_test_vectors()
ser = pickle.dumps(vs, 2)
deser = pickle.loads(ser)
assert vs == deser
def test_vectors_clear():
vs = generate_test_vectors()
vs.clear()
assert len(vs) == 0
def test_vectors_resize():
vs = vectors()
vs.resize(100)
assert len(vs) == 100
for i in range(100):
assert len(vs[i]) == 0
def test_vectors_extend():
vs = vectors()
vs.extend([vector([1, 2, 3]), vector([4, 5, 6])])
assert len(vs) == 2
def test_vectorss_serialization():
vss = generate_test_vectorss()
ser = pickle.dumps(vss, 2)
deser = pickle.loads(ser)
assert vss == deser
def test_vectorss_clear():
vss = generate_test_vectorss()
vss.clear()
assert len(vss) == 0
def test_vectorss_resize():
vss = vectorss()
vss.resize(100)
assert len(vss) == 100
for i in range(100):
assert len(vss[i]) == 0
def test_vectorss_extend():
vss = vectorss()
vss.extend([generate_test_vectors(), generate_test_vectors()])
assert len(vss) == 2
|
DPGAnalysis/SiStripTools/python/poolSource_cff.py | ckamtsikis/cmssw | 852 | 38086 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
|
server02.py | timgates42/csdesign | 116 | 38091 | <gh_stars>100-1000
###############################################################################
#
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
"""
TCP Concurrent Server, I/O Multiplexing (select).
Single server process to handle any number of clients.
"""
__author__ = '<NAME> <<EMAIL>>'
import os
import sys
import errno
import select
import socket
import optparse
BACKLOG = 5
def serve_forever(host, port):
# create, bind. listen
lstsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# re-use the port
lstsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# put listening socket into non-blocking mode
lstsock.setblocking(0)
lstsock.bind((host, port))
lstsock.listen(BACKLOG)
print 'Listening on port %d ...' % port
# read, write, exception lists with sockets to poll
rlist, wlist, elist = [lstsock], [], []
while True:
# block in select
readables, writables, exceptions = select.select(rlist, wlist, elist)
for sock in readables:
if sock is lstsock: # new client connection, we can accept now
try:
conn, client_address = lstsock.accept()
except IOError as e:
code, msg = e.args
if code == errno.EINTR:
continue
else:
raise
# add the new connection to the 'read' list to poll
# in the next loop cycle
rlist.append(conn)
else:
# read a line that tells us how many bytes to write
bytes = sock.recv(1024)
if not bytes: # connection closed by client
sock.close()
rlist.remove(sock)
else:
print ('Got request to send %s bytes. '
'Sending them all...' % bytes)
# send them all
# XXX: this is cheating, we should use 'select' and wlist
# to determine whether socket is ready to be written to
data = os.urandom(int(bytes))
sock.sendall(data)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-i', '--host', dest='host', default='0.0.0.0',
help='Hostname or IP address. Default is 0.0.0.0'
)
parser.add_option(
'-p', '--port', dest='port', type='int', default=2000,
help='Port. Default is 2000')
options, args = parser.parse_args()
serve_forever(options.host, options.port)
if __name__ == '__main__':
main()
|
run/gen-explicit-fee-schedules.py | three-Vs/hedera-services | 164 | 38098 | ###
# A script to convert the Services-consumable feeSchedules.json
# into the "typed" format used by the public pricing calculator.
###
import json
providers = ['nodedata', 'networkdata', 'servicedata']
typed_schedules = {}
with open('hedera-node/src/main/resources/feeSchedules.json', 'r') as fin:
cur_and_next_schedules = json.load(fin)
schedules = cur_and_next_schedules[0]['currentFeeSchedule']
for tfs in schedules:
if 'expiryTime' in tfs:
break
tfs = tfs['transactionFeeSchedule']
function = tfs['hederaFunctionality']
prices_list = tfs['fees']
prices_by_type = {}
for typed_prices in prices_list:
this_type = typed_prices.get('subType', 'DEFAULT')
this_type_prices = {}
for provider in providers:
this_type_prices[provider] = typed_prices[provider]
prices_by_type[this_type] = this_type_prices
typed_schedules[function] = prices_by_type
with open('typedFeeSchedules.json', 'w') as fout:
json.dump(typed_schedules, fout, indent=2)
|
cblue/data/__init__.py | dfhby0/CBLUE | 293 | 38102 | <gh_stars>100-1000
from .data_process import EEDataProcessor, REDataProcessor, ERDataProcessor, CTCDataProcessor, \
CDNDataProcessor, STSDataProcessor, QQRDataProcessor, QICDataProcessor, QTRDataProcessor
from .dataset import EEDataset, REDataset, ERDataset, CTCDataset, CDNDataset, STSDataset, \
QQRDataset, QICDataset, QTRDataset
__all__ = ['EEDataProcessor', 'EEDataset',
'REDataProcessor', 'REDataset',
'ERDataProcessor', 'ERDataset',
'CDNDataProcessor', 'CDNDataset',
'CTCDataProcessor', 'CTCDataset',
'STSDataProcessor', 'STSDataset',
'QQRDataProcessor', 'QQRDataset',
'QICDataProcessor', 'QICDataset',
'QTRDataProcessor', 'QTRDataset']
|
io_scene_vrm/editor/extension.py | 989onan/VRM_Addon_for_Blender | 344 | 38132 | <reponame>989onan/VRM_Addon_for_Blender
import bpy
from .vrm0.property_group import Vrm0PropertyGroup
class VrmAddonArmatureExtensionPropertyGroup(bpy.types.PropertyGroup): # type: ignore[misc]
addon_version: bpy.props.IntVectorProperty( # type: ignore[valid-type]
size=3 # noqa: F722
)
vrm0: bpy.props.PointerProperty( # type: ignore[valid-type]
name="VRM 0.x", type=Vrm0PropertyGroup # noqa: F722
)
armature_data_name: bpy.props.StringProperty() # type: ignore[valid-type]
|
tests/test_service_catalog/test_views/test_admin/test_settings/test_catalog/test_services/test_create.py | LaudateCorpus1/squest | 112 | 38149 | <gh_stars>100-1000
from copy import copy
from io import BytesIO
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.urls import reverse
from service_catalog.models import Service
from tests.test_service_catalog.base import BaseTest
class ServiceCreateTestCase(BaseTest):
def setUp(self):
super(ServiceCreateTestCase, self).setUp()
self.url = reverse('service_catalog:create_service')
def test_create_service(self):
data = {
"name": "new_service",
"description": "a new service",
"job_template": self.job_template_test.id,
"billing": "defined",
"billing_group_id": "",
"billing_group_is_shown": "on"
}
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
number_service_before = copy(Service.objects.all().count())
response = self.client.post(self.url, data=data)
self.assertEqual(302, response.status_code)
self.assertEqual(number_service_before + 1,
Service.objects.all().count())
def test_create_service_with_image(self):
im = Image.new(mode='RGB', size=(200, 200)) # create a new image using PIL
im_io = BytesIO() # a BytesIO object for saving image
im.save(im_io, 'JPEG') # save the image to im_io
im_io.seek(0) # seek to the beginning
image = InMemoryUploadedFile(
im_io, None, 'random-name.jpg', 'image/jpeg', len(im_io.getvalue()), None
)
data = {
"name": "new_service_with_image",
"description": "a new service",
"job_template": self.job_template_test.id,
"billing": "defined",
"billing_group_id": "",
"billing_group_is_shown": "on",
"image": image
}
number_service_before = Service.objects.all().count()
response = self.client.post(self.url, data=data, format="multipart")
self.assertEqual(302, response.status_code)
self.assertEqual(number_service_before + 1,
Service.objects.all().count())
new_service_with_image = Service.objects.get(name="new_service_with_image")
try:
self.assertIsNotNone(new_service_with_image.image.file)
except ValueError:
self.fail("Image not set")
# cleanup image after the test
new_service_with_image.image.delete()
|
tests/test_spiral_spanning_tree_coverage_path_planner.py | duken72/PythonRobotics | 15,431 | 38157 | import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.SpiralSpanningTreeCPP \
import spiral_spanning_tree_coverage_path_planner
spiral_spanning_tree_coverage_path_planner.do_animation = True
def spiral_stc_cpp(img, start):
num_free = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
num_free += img[i][j]
STC_planner = spiral_spanning_tree_coverage_path_planner.\
SpiralSpanningTreeCoveragePlanner(img)
edge, route, path = STC_planner.plan(start)
covered_nodes = set()
for p, q in edge:
covered_nodes.add(p)
covered_nodes.add(q)
# assert complete coverage
assert len(covered_nodes) == num_free / 4
def test_spiral_stc_cpp_1():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_2():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_2.png'))
start = (10, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_3():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_3.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
if __name__ == '__main__':
conftest.run_this_test(__file__)
|
consumerui/grapher.py | AlexRogalskiy/kubeplus | 396 | 38186 | <filename>consumerui/grapher.py
import sys
import json
import subprocess
import sys
import os
from graphviz import Digraph
from graphviz import Graph
class ConnectionsGraph(object):
def draw(self, connections_json, output_folder, relsToHide):
#print(connections_json)
cmd = "ls -ltr /root/"
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0]
#print(out)
fp = open(output_folder + "/" + connections_json, "r")
json_data = fp.read()
json_output = json.loads(json_data)
#print(json_output)
nodemap = {}
for n in json_output:
level = n['Level']
if level in nodemap.keys():
nodelist = nodemap[level]
else:
nodelist = []
nodelist.append(n)
nodemap[level] = nodelist
#print(nodemap)
opformat = 'png'
dot = Graph(comment='Connections Graph', format=opformat)
# dot.node('A', 'King Shivaji')
# dot.node('B', 'Sir Bedevere the Wise')
# dot.node('L', 'Sir Lancelot the Brave')
relsToHideList1 = relsToHide.split(",")
relsToHideList = []
for rel in relsToHideList1:
relsToHideList.append(rel.strip())
#print(relsToHideList)
# Create Nodes
for level, nodelist in nodemap.items():
for n in nodelist:
fqnodename = n['Kind'] + " " + n['Name']
fqpeername = n['PeerKind'] + " " + n['PeerName']
#print(fqnodename + " " + fqpeername)
if n['Kind'] == 'Pod':
dot.node(fqnodename, fqnodename, shape='box', style='filled', color='lightcyan1')
else:
dot.node(fqnodename, fqnodename, shape='box', style='filled', color='snow2')
if level > 0:
color = 'gray0'
relationshipType = n['RelationType']
relationshipDetails = n['RelationDetails']
relationInfo = relationshipType
if relationshipDetails != '' and relationshipType not in relsToHideList:
relationInfo = relationInfo + " (" + relationshipDetails + ")"
if relationshipType == 'specproperty':
color = 'crimson'
if relationshipType == 'label':
color = 'darkgreen'
if relationshipType == 'envvariable':
color = 'gold4'
if relationshipType == 'annotation':
color = 'indigo'
if relationshipType == 'owner reference':
color = 'blue'
dot.edge(fqpeername, fqnodename, color=color, label=relationInfo)
# Create edges
#dot.edges(['AB', 'AL'])
#dot.edge('B', 'L', constraint='false')
#print(dot.source)
filename = connections_json + ".gv"
rendered_file_path = dot.render('/root/' + filename, view=False)
#print("FILENAME:" + filename)
#print("Rendered file path:" + rendered_file_path)
#print("Output available in " + filename + "." + opformat)
#fp1 = open(output_folder + "/abc.txt", "w")
#fp1.write(connections_json)
#fp1.close()
if __name__ == '__main__':
graph = ConnectionsGraph()
#print("Inside connections.py")
connections_json = sys.argv[1]
output_folder = sys.argv[2]
if len(sys.argv) == 4:
relsToHide = sys.argv[3]
else:
relsToHide = ""
#print("Connections_json:"+ connections_json)
#print("Output folder:" + output_folder)
#print(relsToHide)
graph.draw(connections_json, output_folder, relsToHide)
|
kik_unofficial/datatypes/xmpp/history.py | TriSerpent/kik-bot-api-unofficial | 120 | 38213 | from bs4 import BeautifulSoup
import time
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class OutgoingAcknowledgement(XMPPElement):
"""
Represents an outgoing acknowledgement for a message ID
"""
def __init__(self, sender_jid, is_receipt, ack_id, group_jid):
super().__init__()
self.sender_jid = sender_jid
self.group_jid = group_jid
self.is_receipt = is_receipt
self.ack_id = ack_id
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
user_ack_data = (
'<sender jid="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, str(self.is_receipt).lower(), self.ack_id)
group_ack_data = (
'<sender jid="{}" g="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, self.group_jid, str(self.is_receipt).lower(), self.ack_id)
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks>'
'{}'
'</msg-acks>'
'<history attach="false" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp, user_ack_data if self.group_jid != None else group_ack_data)
return data.encode()
class OutgoingHistoryRequest(XMPPElement):
"""
Represents an outgoing request for the account's messaging history
"""
def __init__(self):
super().__init__()
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks />'
'<history attach="true" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp,)
return data.encode()
class HistoryResponse(XMPPResponse):
"""
Represents a Kik messaging history response.
"""
def __init__(self, data: BeautifulSoup):
super().__init__(data)
self.id = data["id"]
if data.query.history:
self.more = data.query.history.has_attr("more")
self.from_jid = data["from"]
self.messages = []
for message in data.query.history:
if message["type"] == "receipt":
args = {
'type':'receipt',
'from_jid': message["from"],
'receipt_type':message.receipt["type"],
'id':message.receipt.msgid["id"]
}
self.messages.append(Struct(**args))
elif message["type"] == "chat":
args = {
'type':'chat',
'id':message["id"],
'from_jid':message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"]
}
self.messages.append(Struct(**args))
elif message["type"] == "groupchat":
args = {
'type': 'groupchat',
'id': message["id"],
'from_jid': message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"],
'group_jid': message.g["jid"]
}
self.messages.append(Struct(**args))
|
mmrotate/models/detectors/oriented_rcnn.py | liuyanyi/mmrotate | 449 | 38227 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import ROTATED_DETECTORS
from .two_stage import RotatedTwoStageDetector
@ROTATED_DETECTORS.register_module()
class OrientedRCNN(RotatedTwoStageDetector):
"""Implementation of `Oriented R-CNN for Object Detection.`__
__ https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Oriented_R-CNN_for_Object_Detection_ICCV_2021_paper.pdf # noqa: E501, E261.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(OrientedRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmrotate/tools/analysis_tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 6).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
|
e2e_tests/tests/fixtures/pytorch_lightning_amp/model_def.py | gh-determined-ai/determined | 1,729 | 38312 | """
This example shows how to interact with the Determined PyTorch Lightning Adapter
interface to build a basic MNIST network. LightningAdapter utilizes the provided
LightningModule with Determined's PyTorch control loop.
"""
from determined.pytorch import PyTorchTrialContext, DataLoader
from determined.pytorch.lightning import LightningAdapter
import data
import mnist
class MNISTTrial(LightningAdapter):
def __init__(self, context: PyTorchTrialContext, *args, **kwargs) -> None:
lm = mnist.LitMNIST(
hidden_size=context.get_hparam('hidden_size'),
learning_rate=context.get_hparam('learning_rate'),
)
data_dir = f"/tmp/data-rank{context.distributed.get_rank()}"
self.dm = data.MNISTDataModule(
data_url=context.get_data_config()["url"],
data_dir=data_dir,
batch_size=context.get_per_slot_batch_size(),
)
super().__init__(context, lightning_module=lm, *args, **kwargs)
self.dm.prepare_data()
def build_training_data_loader(self) -> DataLoader:
self.dm.setup()
dl = self.dm.train_dataloader()
return DataLoader(dl.dataset, batch_size=dl.batch_size, num_workers=dl.num_workers)
def build_validation_data_loader(self) -> DataLoader:
self.dm.setup()
dl = self.dm.val_dataloader()
return DataLoader(dl.dataset, batch_size=dl.batch_size, num_workers=dl.num_workers)
|
AFLW/fddb_symbol_gen.py | kli-nlpr/FaceDetection-ConvNet-3D | 159 | 38329 | <filename>AFLW/fddb_symbol_gen.py
import mxnet as mx
def get_vgg16_gen():
relu_feature = mx.symbol.Variable(name="relu_feature")
box_predict = mx.symbol.Variable(name="box_predict")
ground_truth = mx.symbol.Variable(name="ground_truth")
bbox_label = mx.symbol.Variable(name="bbox_label")
ell_label = mx.symbol.GenEllLabel(*[box_predict, bbox_label, ground_truth], spatial_scale=0.5, name="ell_label")
# roi warping
roi_warping = mx.symbol.ROIWarping(*[relu_feature, box_predict, ground_truth], warped_shape=(28, 28),
spatial_scale=0.5, name="roi_warping")
roi_warping_pool = mx.symbol.Pooling(
data=roi_warping, pool_type="max", kernel=(4, 4), stride=(4, 4), name="roi_warping_pool"
)
roi_warping_flatten = mx.symbol.Flatten(data=roi_warping_pool)
loss_all = mx.symbol.Group([roi_warping_flatten, ell_label])
return loss_all
|
runtime/module_resolution.py | cheery/lever | 136 | 38332 | <reponame>cheery/lever<filename>runtime/module_resolution.py
from space import *
import base
import bon
import evaluator
import core
import os
import pathobj
import stdlib
import sys
class ModuleScope(Object):
def __init__(self, local, parent=None, frozen=False):
self.cache = {} # maps absolute path -> module cache entry
self.local = local
self.parent = parent
self.frozen = frozen # if frozen, the scope relies on cache.
self.compile_file = null
self.base_module = None
def setcache(self, m_path, module, mtime):
m = ModuleCache(m_path, module, mtime)
self.cache[pathobj.stringify(m_path)] = m
return m
def getcache(self, m_path):
s = pathobj.stringify(m_path)
try:
return self.cache[s]
except KeyError as k:
return None
def getattr(self, name):
if name == u"parent":
return self.parent if self.parent is not None else null
if name == u"local":
return self.local
if name == u"frozen":
return boolean(self.frozen)
if name == u"base_module":
if self.base_module is None:
return null
return self.base_module
if name == u"compile_file":
return self.compile_file
return Object.getattr(self, name)
def setattr(self, name, value):
if name == u"base_module":
if len(self.cache) > 0:
raise unwind(LTypeError(u"Cannot change base_module in active module scope"))
self.base_module = cast_n(value, Module, u"ModuleScope.base_module")
return null
return Object.setattr(self, name, value)
def listattr(self):
listing = Object.listattr(self)
listing.extend([
String(u"parent"),
String(u"local"),
String(u"frozen"),
String(u"base_module"),
String(u"compile_file"),
])
return listing
def getitem(self, item):
if isinstance(item, String):
if item.string in self.cache:
return self.cache[item.string]
raise OldError(u"%s not in module scope" % item.repr())
def iter(self):
return ScopeIterator(self.cache.iterkeys())
#
@ModuleScope.instantiator2(signature(pathobj.Path, ModuleScope, Object, optional=2))
def _(local, parent, options):
scope = ModuleScope(local, parent)
if options:
key = String(u"compile_file")
if options.contains(key):
scope.compile_file = options.getitem(key)
return scope
class ScopeIterator(Object):
_immutable_fields_ = ['iterator']
def __init__(self, iterator):
self.iterator = iterator
def iter(self):
return self
@ScopeIterator.builtin_method
@signature(ScopeIterator)
def next(self):
return String(self.iterator.next())
class ModuleCache(Object):
def __init__(self, path, module, mtime):
self.path = path
self.module = module
self.mtime = mtime
def getattr(self, name):
if name == u"path":
return self.path
if name == u"module":
return self.module
if name == u"mtime":
return Float(self.mtime)
return Object.getattr(self, name)
def listattr(self):
listing = Object.listattr(self)
listing.extend([
String(u"path"),
String(u"module"),
String(u"mtime"),
])
return listing
@ModuleCache.builtin_method
@signature(ModuleCache)
def get_moduleinfo(self):
return moduleinfo(self.path)
root_module = ModuleScope(pathobj.parse(u"builtin:/"), frozen=True)
root_module.base_module = base.module
for py_module in stdlib.import_all_modules():
assert isinstance(py_module.module, Module), "dependency cycle somewhere"
p = pathobj.concat(root_module.local, pathobj.parse(py_module.module.name))
py_module.module.setattr_force(u"doc", pathobj.parse(u"doc:/" + py_module.module.name))
root_module.setcache(p, py_module.module, 0.0)
import naming
naming.breath_first_search(py_module.module, 1.0)
base.module.setattr_force(u"doc", pathobj.parse(u"doc:/base"))
root_module.setcache(pathobj.parse(u"builtin:/" + base.module.name), base.module, 0.0)
# the importer poststage for base module will take place in
# entry generation at runtime/main.py because there are so many
# items added into the base module all around the system.
import main
def start(main_script):
assert isinstance(main_script, String)
lib_scope = ModuleScope(
pathobj.concat(core.get_ec().lever_path, pathobj.parse(u"lib")),
root_module)
lib_scope.compile_file = LazyLoader(lib_scope)
main_path = pathobj.os_parse(resuffix(main_script.string, u".lc", u""))
mi = moduleinfo(pathobj.abspath(main_path))
scope = ModuleScope(mi.directory, lib_scope)
this = Module(mi.name.string, {}, extends=base.module) # base.module
if not (mi.lc_present or mi.cb_present):
raise OldError(u"main module not present")
scope.setcache(main_path, this, max(mi.lc_mtime, mi.cb_mtime))
mi.default_config(this, scope)
mi.loadit(this, scope)
return this
class LazyLoader(Object):
def __init__(self, lib_scope):
self.lib_scope = lib_scope
def call(self, argv):
lib_scope = self.lib_scope
mi = moduleinfo(pathobj.concat(lib_scope.local, pathobj.parse(u"compiler")))
this = Module(mi.name.string, {}, extends=base.module) # base.module
mi.default_config(this, lib_scope)
mi.loadit(this, lib_scope)
lib_scope.compile_file = this.getattr(u"compile_file")
return lib_scope.compile_file.call(argv)
# plans:
# allow modules derive or create new scopes and isolate themselves.
# module path
def moduleinfo(module_path):
module_path = pathobj.abspath(module_path)
module_name = module_path.getattr(u"basename")
assert isinstance(module_name, String)
s = pathobj.os_stringify(module_path).encode('utf-8')
is_dir = False
if os.path.isdir(s):
w = os.path.join(s, "init")
if os.path.exists(w + ".lc.cb") or os.path.exists(w + ".lc"):
is_dir = True
s = w
else:
module_path = pathobj.directory(module_path)
cb_path = s + ".lc.cb"
cb_present = os.path.exists(cb_path)
cb_mtime = 0.0
lc_path = s + ".lc"
lc_present = os.path.exists(lc_path)
lc_mtime = 0.0
if cb_present:
cb_mtime = os.path.getmtime(cb_path)
if lc_present:
lc_mtime = os.path.getmtime(lc_path)
# This ignores outdated bytecode objects.
if cb_present and lc_present:
cb_present = not cb_mtime < lc_mtime
return ModuleInfo(
module_name, module_path,
pathobj.os_parse(cb_path.decode('utf-8')), cb_present, cb_mtime,
pathobj.os_parse(lc_path.decode('utf-8')), lc_present, lc_mtime,
)
class ModuleInfo(Object):
def __init__(self, name, directory, cb_path, cb_present, cb_mtime, lc_path, lc_present, lc_mtime):
self.name = name
self.directory = directory
self.cb_path = cb_path
self.cb_present = cb_present
self.cb_mtime = cb_mtime
self.lc_path = lc_path
self.lc_present = lc_present
self.lc_mtime = lc_mtime
def default_config(self, module, scope):
module.setattr(u"dir", self.directory)
module.setattr(u"name", self.name)
module.setattr(u"import", Import(self.directory, scope))
return module
def loadit(self, module, scope):
if not self.cb_present:
while scope.compile_file is null and scope.parent is not None:
scope = scope.parent
if scope.compile_file is null:
raise OldError(u"Lever bytecode compiler stale or missing: " + self.lc_path.repr())
scope.compile_file.call([self.cb_path, self.lc_path])
self.cb_mtime = os.path.getmtime(pathobj.os_stringify(self.cb_path).encode('utf-8'))
self.cb_present = True
program = evaluator.loader.from_object(bon.open_file(self.cb_path), self.cb_path)
res = program.call([module])
return res
def getattr(self, name):
if name == u"present":
return boolean(self.cb_present or self.lc_present)
if name == u"mtime":
return Float(max(self.lc_mtime, self.cb_mtime))
return Object.getattr(self, name)
class Import(Object):
def __init__(self, local, scope):
self.local = local
self.scope = scope
def call(self, argv):
if len(argv) != 1:
raise OldError(u"wrong number of arguments to import")
name = argv[0]
if isinstance(name, pathobj.Path):
raise OldError(u"no direct loading yet")
elif not isinstance(name, String):
raise OldError(u"expected string")
# import resolution:
# local/script.lc
path = pathobj.concat(self.local, pathobj.to_path(name))
cache = self.scope.getcache(path)
if cache:
return cache.module
if not self.scope.frozen:
mi = moduleinfo(path)
if mi.lc_present or mi.cb_present:
base_module = get_base_module(self.scope)
this = Module(name.string, {}, extends=base_module) # base.module
self.scope.setcache(path, this, max(mi.lc_mtime, mi.cb_mtime))
mi.default_config(this, self.scope)
mi.loadit(this, self.scope)
return this
# scope/
scope = self.scope
while scope is not None:
path = pathobj.concat(scope.local, pathobj.to_path(name))
cache = scope.getcache(path)
if cache:
return cache.module
if not scope.frozen:
mi = moduleinfo(path)
if mi.lc_present or mi.cb_present:
base_module = get_base_module(scope)
this = Module(name.string, {}, extends=base_module) # base.module
scope.setcache(path, this, max(mi.lc_mtime, mi.cb_mtime))
mi.default_config(this, scope)
mi.loadit(this, scope)
return this
scope = scope.parent
raise OldError(u"module '%s' not present" % name.string)
def getattr(self, name):
if name == u'scope':
return self.scope
if name == u"local":
return self.local
return Object.getattr(self, name)
def get_base_module(scope):
while scope.parent and scope.base_module is None:
scope = scope.parent
return scope.base_module
@Import.instantiator2(signature(pathobj.Path, ModuleScope))
def _(local, scope):
return Import(local, scope)
@ModuleScope.builtin_method
@signature(ModuleScope, String)
def reimport(scope, obj):
if obj.string not in scope.cache:
raise OldError(u"Cannot reimport, module not present")
mc = scope.cache[obj.string]
mi = moduleinfo(mc.path)
mi.default_config(mc.module, scope)
mi.loadit(mc.module, scope)
mc.mtime = max(mi.lc_mtime, mi.cb_mtime)
return mc.module
def resuffix(string, suffix, new_suffix=u""):
if string.endswith(suffix):
i = max(0, len(string) - len(suffix))
return string[0:i] + new_suffix
return string + new_suffix
base.module.setattr_force(u"ModuleScope", ModuleScope.interface)
base.module.setattr_force(u"Import", Import.interface)
|
up/utils/model/optim/__init__.py | ModelTC/EOD | 196 | 38350 | <filename>up/utils/model/optim/__init__.py
from .lars import LARS # noqa
from .lamb import LAMB # noqa |
anchore_manager/version.py | Nordix/anchore-engine | 110 | 38392 | <filename>anchore_manager/version.py
version = "0.9.4"
|
faced/const.py | hseguro/faced | 575 | 38440 | <filename>faced/const.py
import os
MODELS_PATH = os.path.join(os.path.dirname(__file__), "models")
YOLO_SIZE = 288
YOLO_TARGET = 9
CORRECTOR_SIZE = 50
|
test/test_insert_documents.py | ShaneKilkelly/bedquilt | 288 | 38459 | <reponame>ShaneKilkelly/bedquilt
import testutils
import json
import string
import psycopg2
class TestInsertDocument(testutils.BedquiltTestCase):
def test_insert_into_non_existant_collection(self):
doc = {
"_id": "<EMAIL>",
"name": "<NAME>",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertEqual(
result, ('<EMAIL>',)
)
self.cur.execute("select bq_list_collections();")
collections = self.cur.fetchall()
self.assertIsNotNone(collections)
self.assertEqual(collections, [("people",)])
def test_with_non_string_id(self):
docs = [
{
"_id": 42,
"name": "Penguin",
"age": "<EMAIL>"
},
{
"_id": ['derp'],
"name": "Penguin",
"age": "<EMAIL>"
},
{
"_id": {"name": "Penguin"},
"age": "<EMAIL>"
},
{
"_id": False,
"name": "Penguin",
"age": "<EMAIL>"
},
{
"_id": None,
"name": "Penguin",
"age": "<EMAIL>"
}
]
for doc in docs:
with self.assertRaises(psycopg2.InternalError):
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
self.conn.rollback()
def test_insert_without_id(self):
doc = {
"name": "<NAME>",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertIsNotNone(result)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), 1)
_id = result[0]
self.assertIn(type(_id), {str, unicode})
self.assertEqual(len(_id), 24)
for character in _id:
self.assertIn(character, string.hexdigits)
def test_with_single_quotes_in_field(self):
doc = {
"description": "Something I've eaten"
}
self.cur.execute("""
select bq_insert('things', %s);
""", (json.dumps(doc),))
result = self.cur.fetchone()
self.assertIsNotNone(result)
def test_insert_with_repeat_id(self):
doc = {
"_id": "user_one",
"name": "<NAME>",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertIsNotNone(result)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), 1)
_id = result[0]
self.assertEqual(_id, "user_one")
self.conn.commit()
with self.assertRaises(psycopg2.IntegrityError):
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
self.conn.rollback()
self.cur.execute("select count(*) from people;")
result = self.cur.fetchone()
self.assertEqual(result, (1,))
|
src/Query/apifuzz.py | codexgigassys/codex-backend | 161 | 38481 | # Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
import pathmagic
from pymongo import MongoClient
import ssdeep
from env import envget
def searchFuzzy(fuzz, limit, thresh):
client = MongoClient(envget('metadata.host'), envget('metadata.port'))
db = client[envget('db_metadata_name')]
coll_meta = db["db_metadata_collection"]
f1 = coll_meta.find({}, {"file_id": 1, "fuzzy_hash": 1}).limit(limit)
l = []
for f in f1:
l.append(f)
ret = {}
for a in l:
res = -1
try:
res = ssdeep.compare(a["fuzzy_hash"], fuzz)
except InternalError:
print(str(res) + "------" +
str(a["fuzzy_hash"]) + "-----" + str(a["file_id"]))
continue
if(res >= thresh):
ret[a["file_id"]] = res
return ret
def searchFull(search, limit):
# print("1")
client = MongoClient(envget('metadata.host'), envget('metadata.port'))
# print("2")
db = client[envget('db_metadata_name')]
# print("3")
coll_meta = db["db_metadata_collection"]
# print("4")
f1 = coll_meta.find(search).limit(limit)
# print("5")
l = []
for f in f1:
l.append(f)
# print("6")
ret = []
for a in l:
ret.append(str(a["file_id"]))
# print("7")
return ret
|
yasql/apps/sqlorders/urls.py | Fanduzi/YaSQL | 443 | 38483 | <reponame>Fanduzi/YaSQL
# -*- coding:utf-8 -*-
# edit by fuzongfei
from django.urls import path
from sqlorders import views
urlpatterns = [
# SQL工单
path('envs', views.GetDBEnvironment.as_view(), name='v1.sqlorders.db-environment'),
path('schemas', views.GetDbSchemas.as_view(), name='v1.sqlorders.db-schemas'),
path('incep/syntaxcheck', views.IncepSyntaxCheckView.as_view(), name='v1.sqlorders.incep.syntaxcheck'),
path('commit', views.SqlOrdersCommit.as_view(), name='v1.sqlorders.commit'),
path('list', views.SqlOrdersList.as_view(), name='v1.sqlorders.list'),
path('detail/<str:order_id>', views.SqlOrdersDetail.as_view(), name='v1.sqlorders.detail'),
path('op/approve/<int:pk>', views.OpSqlOrderView.as_view({"put": "approve"}), name='v1.sqlorders.approve'),
path('op/feedback/<int:pk>', views.OpSqlOrderView.as_view({"put": "feedback"}), name='v1.sqlorders.feedback'),
path('op/close/<int:pk>', views.OpSqlOrderView.as_view({"put": "close"}), name='v1.sqlorders.close'),
path('op/review/<int:pk>', views.OpSqlOrderView.as_view({"put": "review"}), name='v1.sqlorders.review'),
# 生成工单任务
path('tasks/generate', views.GenerateTasksView.as_view(), name='v1.sqlorders.generate-tasks'),
path('tasks/get/<str:order_id>', views.GetTaskIdView.as_view(), name='v1.sqlorders.get-task-id'),
path('tasks/list/<str:task_id>', views.GetTasksListView.as_view(), name='v1.sqlorders.get-tasks-list'),
path('tasks/preview/<str:task_id>', views.GetTasksPreviewView.as_view(),
name='v1.sqlorders.get-tasks-preview'),
# 执行任务
path('tasks/execute/single', views.ExecuteSingleTaskView.as_view(), name='v1.sqlorders.execute-single-task'),
path('tasks/execute/multi', views.ExecuteMultiTasksView.as_view(), name='v1.sqlorders.execute-multi-tasks'),
path('tasks/throttle', views.ThrottleTaskView.as_view(), name='v1.sqlorders.throttle-task'),
path('tasks/result/<int:id>', views.GetTasksResultView.as_view(), name='v1.sqlorders.get-tasks-result'),
# Hook
path('hook', views.HookSqlOrdersView.as_view(), name='v1.sqlorders.hook-sqlorders'),
# download export files
path('export/download/<str:base64_filename>', views.DownloadExportFilesView.as_view(),
name='v1.sqlorders.download-export-files'),
# 上线版本
path('versions/get', views.ReleaseVersionsGet.as_view(), name='v1.sqlorders.versions.get'),
path('versions/list', views.ReleaseVersionsList.as_view(), name='v1.sqlorders.versions.list'),
path('versions/create', views.ReleaseVersionsCreate.as_view(),
name='v1.sqlorders.versions.create'),
path('versions/update/<int:key>', views.ReleaseVersionsUpdate.as_view(),
name='v1.sqlorders.versions.update'),
path('versions/delete/<int:id>', views.ReleaseVersionsDelete.as_view(),
name='v1.sqlorders.versions.delete'),
path('versions/view/<str:version>', views.ReleaseVersionsView.as_view(),
name='v1.sqlorders.versions.view'),
]
|
09WebFramework/day04/basic04.py | HaoZhang95/PythonAndMachineLearning | 937 | 38485 | <reponame>HaoZhang95/PythonAndMachineLearning
"""
ORM是django的核心思想, object-related-mapping对象-关系-映射
ORM核心就是操作数据库的时候不再直接操作sql语句,而是操作对象
定义一个类,类中有uid,username等类属型,sql语句insert修改的时候直接插入这个User对象
"""
# ORM映射实现原理,通过type修改类对象信息
# 定义这个元类metaclass
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
# name --> User
# bases --> object
# attrs --> {
# "uid" :('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# "email": ('email', "varchar(30)"),
# "password": ('password', "varchar(30)"),
# "__init__": xxx,
# "save": xxx2,
# }
mappings = dict()
# 判断是否需要保存
for k, v in attrs.items():
# 判断是否是元组类型
if isinstance(v, tuple):
print('Found mapping: %s ==> %s' % (k, v))
mappings[k] = v
# 删除这些已经在字典中存储的属性
for k in mappings.keys():
attrs.pop(k) # 等于del attrs[k]
# 将之前的uid/name/email/password以及对应的对象引用、类名字
# attrs = {
# "__init__": xxxx,
# "save": xxxx2,
# "__mappings__": {
# "uid": ('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# ""email: ('email', "varchar(30)"),
# "password": ('password', "varchar(30)")
# },
# "__table__": "User"
# }
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = name # 假设表名和类名一致
return type.__new__(cls, name, bases, attrs)
class User(metaclass=ModelMetaclass):
uid = ('uid', "int unsigned")
name = ('username', "varchar(30)")
email = ('email', "varchar(30)")
password = ('password', "<PASSWORD>)")
# 当指定元类之后,以上的类属性将不在类中,而是在__mappings__属性指定的字典中存储
# 以上User类中有
# __mappings__ = {
# "uid": ('uid', "int unsigned")
# "name": ('username', "varchar(30)")
# "email": ('email', "varchar(30)")
# "password": ('password', "varchar(30)")
# }
# __table__ = "User"
# 参数名是kwargs,不是**kwargs,**只是告诉解释器将传来的参数变为字典
# for循环遍历__new__返回的attrs字典,实现实例对象的属性和方法赋值
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
fields = [] # ["uid", "username"...]
args = [] #[12345, "laowang"...]
# 创建的实例对象中没有__mapping__,去类对象中找
# k --> uid, v --> 12345
for k, v in self.__mappings__.items():
fields.append(v[0])
args.append(getattr(self, k, None))
args_temp = list()
for temp in args:
if isinstance(temp, int):
# 判断如果是数字类型
args_temp.append(str(temp))
elif isinstance(temp, str):
# 判断如果是字符串类型
args_temp.append("""'%s'""" % temp)
# sql = 'insert into %s (%s) values (%s);' \
# % (self.__table__, ','.join(fields), ','.join([str(i) for i in args]))
# 使用",".join为每一个字段后都插入逗号分隔
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(args_temp))
print('SQL: %s' % sql)
# 抽取为基类,再创建User2这个类,就直接让其继承Model类
class Model(object, metaclass=ModelMetaclass):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
fields = []
args = []
for k, v in self.__mappings__.items():
fields.append(v[0])
args.append(getattr(self, k, None))
args_temp = list()
for temp in args:
# 判断入如果是数字类型
if isinstance(temp, int):
args_temp.append(str(temp))
elif isinstance(temp, str):
args_temp.append("""'%s'""" % temp)
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(args_temp))
print('SQL: %s' % sql)
class User2(Model):
uid = ('uid', "int unsigned")
name = ('username', "varchar(30)")
email = ('email', "varchar(30)")
password = ('password', "<PASSWORD>)")
def test01():
u = User(uid=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>')
# print(u.__dict__)
u.save()
def test02():
list = ['12356', "laowang", "email"]
print(",".join(list))
def main():
# test01()
test02()
if __name__ == '__main__':
main()
|
Text/TextQualityWatchdog/Watchdog/__init__.py | iii-PaulCridland/azure-search-power-skills | 128 | 38492 | # Standard libraries
import os
import json
import logging
from typing import Text
# Azure functions
import azure.functions as func
# Inference runtime
import onnxruntime as ort
from tokenizers import BertWordPieceTokenizer
# Helper scripts
from .PreprocessData import normalize_text, truncate_text
from .Predict import get_ids_and_masks, predict
# Initialize ONNX runtime and language model tokenizer
vocab_file_path = os.path.join(os.path.dirname(__file__), "Model/bert-base-uncased-vocab.txt")
onnx_file_path = os.path.join(os.path.dirname(__file__), "Model/watchdog_model.onnx")
tokenizer = BertWordPieceTokenizer(vocab_file_path)
tokenizer.enable_padding(pad_id=0, pad_token="[PAD]", length=128)
tokenizer.enable_truncation(max_length=128)
ort_session = ort.InferenceSession(onnx_file_path)
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Invoked TextQualityWatchdog Skill.')
try:
body = json.dumps(req.get_json())
if body:
logging.info(body)
values = json.loads(body)['values']
results = {}
results["values"] = []
for value in values:
text = value['data']['text']
# Apply puntuation and whitespace normalization, and convert to lowercase
text = normalize_text(text)
# Truncate the text to a maximum of 128 (default) whitespace separated tokens
text = truncate_text(text)
# Compute the input tokens and attention masks for the text sequence
input_ids, attention_masks = get_ids_and_masks(tokenizer, text)
# Call the ONNX model to perform inference on the input
flat_prediction = predict(ort_session, input_ids, attention_masks)
payload = (
{
"recordId": value['recordId'],
"data": {
"text_quality_warning": int(flat_prediction[0])
}
}
)
results["values"].append(payload)
result = json.dumps(results, ensure_ascii=False)
return func.HttpResponse(result, mimetype="application/json")
else:
return func.HttpResponse(
"Invalid body",
status_code=400
)
except ValueError:
return func.HttpResponse(
"Invalid body",
status_code=400
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.