max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
main.py | impressive8/Practice | 197 | 44090 | # ===================================
# Import the libraries
# ===================================
import numpy as np
from matplotlib import pylab as plt
import imaging
import utility
import os,sys
# ===================================
# Which stages to run
# ===================================
do_add_noise = False
do_black_level_correction = True
do_lens_shading_correction = True
do_bad_pixel_correction = True
do_channel_gain_white_balance = True
do_bayer_denoise = False
do_demosaic = True
do_demosaic_artifact_reduction = True
do_color_correction = True
do_gamma = True
do_chromatic_aberration_correction = True
do_tone_mapping = True
do_memory_color_enhancement = True
do_noise_reduction = True
do_sharpening = True
do_distortion_correction = False
# ===================================
# Remove all the .png files
os.system("rm images/*.png")
# ===================================
# ===================================
# raw image and set up the metadata
# ===================================
# uncomment the image_name to run it via pipeline
image_name = "DSC_1339_768x512_rggb" # image content: Rose rggb
# image_name = "DSC_1339_768x512_gbrg" # image content: Rose gbrg
# image_name = "DSC_1339_768x512_grbg" # image content: Rose grbg
# image_name = "DSC_1339_768x512_bggr" # image content: Rose bggr
# image_name = "DSC_1320_2048x2048_rggb" # image content: Potrait
# image_name = "DSC_1372_6032x4032_rggb" # image content: Downtown San Jose
# image_name = "DSC_1372_12096x6032_rgb_out_demosaic" # image content: Downtown San Jose after demosaic
# read the raw image
temp = np.fromfile("images/" + image_name + ".raw", dtype="uint16", sep="")
if (image_name == "DSC_1339_768x512_rggb"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_gbrg"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_gbrg", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("gbrg")
raw.set_channel_gain((1.0, 1.34375, 1.94921875, 1.0)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_grbg"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_grbg", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("grbg")
raw.set_channel_gain((1.0, 1.94921875, 1.34375, 1.0)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_bggr"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_bggr", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("bggr")
raw.set_channel_gain((1.34375, 1.0, 1.0, 1.94921875,)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1320_2048x2048_rggb"):
temp = temp.reshape([2048, 2048])
raw = imaging.ImageInfo("1320_2048x2048_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1372_6032x4032_rggb"):
temp = temp.reshape([4032, 6032])
raw = imaging.ImageInfo("DSC_1372_6032x4032_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1372_12096x6032_rgb_out_demosaic"):
temp = temp.reshape([12096, 6032])
temp = np.float32(temp)
data = np.empty((4032, 6032, 3), dtype=np.float32)
data[:, :, 0] = temp[0:4032, :]
data[:, :, 1] = temp[4032 : 2*4032, :]
data[:, :, 2] = temp[2*4032 : 3*4032, :]
raw = imaging.ImageInfo("DSC_1372_12096x6032_rgb_out_demosaic", data)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
else:
print("Warning! image_name not recognized.")
# ===================================
# Add noise
# ===================================
if do_add_noise:
noise_mean = 0
noise_standard_deviation = 100
seed = 100
clip_range = [600, 65535]
data = utility.synthetic_image_generate(\
raw.get_width(), raw.get_height()).create_noisy_image(\
data, noise_mean, noise_standard_deviation, seed, clip_range)
else:
pass
# ===================================
# Black level correction
# ===================================
if do_black_level_correction:
data = imaging.black_level_correction(data, \
raw.get_black_level(),\
raw.get_white_level(),\
[0, 2**raw.get_bit_depth() - 1])
utility.imsave(data, "images/" + image_name + "_out_black_level_correction.png", "uint16")
else:
pass
# ===================================
# Lens shading correction
# ===================================
if do_lens_shading_correction:
# normally dark_current_image and flat_field_image are
# captured in the image quality lab using flat field chart
# here we are synthetically generating thouse two images
dark_current_image, flat_field_image = utility.synthetic_image_generate(\
raw.get_width(), raw.get_height()).create_lens_shading_correction_images(\
0, 65535, 40000)
# save the dark_current_image and flat_field_image for viewing
utility.imsave(dark_current_image, "images/" + image_name + "_dark_current_image.png", "uint16")
utility.imsave(flat_field_image, "images/" + image_name + "_flat_field_image.png", "uint16")
data = imaging.lens_shading_correction(data).flat_field_compensation(\
dark_current_image, flat_field_image)
# data = lsc.approximate_mathematical_compensation([0.01759, -28.37, -13.36])
utility.imsave(data, "images/" + image_name + "_out_lens_shading_correction.png", "uint16")
else:
pass
# ===================================
# Bad pixel correction
# ===================================
if do_bad_pixel_correction:
neighborhood_size = 3
data = imaging.bad_pixel_correction(data, neighborhood_size)
utility.imsave(data, "images/" + image_name + "_out_bad_pixel_correction.png", "uint16")
else:
pass
# ===================================
# Channel gain for white balance
# ===================================
if do_channel_gain_white_balance:
data = imaging.channel_gain_white_balance(data,\
raw.get_channel_gain())
utility.imsave(data, "images/" + image_name + "_out_channel_gain_white_balance.png", "uint16")
else:
pass
# ===================================
# Bayer denoising
# ===================================
if do_bayer_denoise:
# bayer denoising parameters
neighborhood_size = 5
initial_noise_level = 65535 * 10 / 100
hvs_min = 1000
hvs_max = 2000
clip_range = [0, 65535]
threshold_red_blue = 1300
# data is the denoised output, ignoring the second output
data, _ = imaging.bayer_denoising(data).utilize_hvs_behavior(\
raw.get_bayer_pattern(), initial_noise_level, hvs_min, hvs_max, threshold_red_blue, clip_range)
utility.imsave(data, "images/" + image_name + "_out_bayer_denoising.png", "uint16")
# utility.imsave(np.clip(texture_degree_debug*65535, 0, 65535), "images/" + image_name + "_out_texture_degree_debug.png", "uint16")
else:
pass
# ===================================
# Demosacing
# ===================================
if do_demosaic:
#data = imaging.demosaic(data, raw.get_bayer_pattern()).mhc(False)
data = imaging.demosaic(data, raw.get_bayer_pattern()).directionally_weighted_gradient_based_interpolation()
utility.imsave(data, "images/" + image_name + "_out_demosaic.png", "uint16")
else:
pass
# ===================================
# Demosaic artifact reduction
# ===================================
if do_demosaic_artifact_reduction:
data = imaging.demosaic(data).post_process_local_color_ratio(0.80 * 65535)
utility.imsave(data, "images/" + image_name + "_out_local_color_ratio.png", "uint16")
edge_detection_kernel_size = 5
edge_threshold = 0.05
# first output is main output, second output is edge_location is a debug output
data, _ = imaging.demosaic(data).post_process_median_filter(edge_detection_kernel_size, edge_threshold)
utility.imsave(data, "images/" + image_name + "_out_median_filter.png", "uint16")
# utility.imsave(edge_location*65535, "images/" + image_name + "_edge_location.png", "uint16")
else:
pass
# ===================================
# Color correction
# ===================================
if do_color_correction:
data = imaging.color_correction(data, raw.get_color_matrix()).apply_cmatrix()
utility.imsave(data, "images/" + image_name + "_out_color_correction.png", "uint16")
else:
pass
# ===================================
# Gamma
# ===================================
if do_gamma:
# brightening
data = imaging.nonlinearity(data, "brightening").luma_adjustment(80.)
# gamma by value
#data = imaging.nonlinearity(data, "gamma").by_value(1/2.2, [0, 65535])
# gamma by table
# data = imaging.nonlinearity(data, "gamma").by_table("tables/gamma_2.4.txt", "gamma", [0, 65535])
# gamma by value
data = imaging.nonlinearity(data, "gamma").by_equation(-0.9, -8.0, [0, 65535])
utility.imsave(data, "images/" + image_name + "_out_gamma.png", "uint16")
else:
pass
# ===================================
# Chromatic aberration correction
# ===================================
if do_chromatic_aberration_correction:
nsr_threshold = 90.
cr_threshold = 6425./2
data = imaging.chromatic_aberration_correction(data).purple_fringe_removal(nsr_threshold, cr_threshold)
utility.imsave(data, "images/" + image_name + "_out_purple_fringe_removal.png", "uint16")
else:
pass
# ===================================
# Tone mapping
# ===================================
if do_tone_mapping:
data = imaging.tone_mapping(data).nonlinear_masking(1.0)
utility.imsave(data, "images/" + image_name + "_out_tone_mapping_nl_masking.png", "uint16")
# data = imaging.tone_mapping(data).dynamic_range_compression("normal", [-25., 260.], [0, 65535])
# utility.imsave(data, "images/" + image_name + "_out_tone_mapping_drc.png", "uint16")
else:
pass
# ===================================
# Memory color enhancement
# ===================================
if do_memory_color_enhancement:
# target_hue = [30., -115., 100.]
# hue_preference = [45., -90., 130.]
# hue_sigma = [20., 10., 5.]
# is_both_side = [True, False, False]
# multiplier = [0.6, 0.6, 0.6]
# chroma_preference = [25., 17., 30.]
# chroma_sigma = [10., 10., 5.]
target_hue = [30., -125., 100.]
hue_preference = [20., -118., 130.]
hue_sigma = [20., 10., 5.]
is_both_side = [True, False, False]
multiplier = [0.6, 0.6, 0.6]
chroma_preference = [25., 14., 30.]
chroma_sigma = [10., 10., 5.]
data = imaging.memory_color_enhancement(data).by_hue_squeeze(target_hue,\
hue_preference,\
hue_sigma,\
is_both_side,\
multiplier,\
chroma_preference,\
chroma_sigma)
utility.imsave(data, "images/" + image_name + "_out_memory_color_enhancement.png", "uint16")
else:
pass
# ===================================
# Noise reduction
# ===================================
if do_noise_reduction:
# sigma filter parameters
neighborhood_size = 7
sigma = [1000, 500, 500]
data = imaging.noise_reduction(data).sigma_filter(neighborhood_size, sigma)
utility.imsave(data, "images/" + image_name + "_out_noise_reduction.png", "uint16")
else:
pass
# ===================================
# Sharpening
# ===================================
if do_sharpening:
data = imaging.sharpening(data).unsharp_masking()
utility.imsave(data, "images/" + image_name + "_out_sharpening.png", "uint16")
else:
pass
# ===================================
# Distortion correction
# ===================================
if do_distortion_correction:
correction_type="barrel-1"
strength=0.5
zoom_type="fit"
clip_range=[0, 65535]
data = imaging.distortion_correction(data).empirical_correction(correction_type, strength, zoom_type, clip_range)
utility.imsave(data, "images/" + image_name + "_out_distortion_correction.png", "uint16")
else:
pass
|
coldsweat/fetcher.py | jeroenh/coldsweat | 106 | 44091 | # -*- coding: utf-8 -*-
'''
Description: the feed fetcher
Copyright (c) 2013—2016 <NAME>
Portions are copyright (c) 2013 <NAME>
License: MIT (see LICENSE for details)
'''
import sys, os, re, time, urlparse
from datetime import datetime
from peewee import IntegrityError
import feedparser
import requests
from requests.exceptions import *
from webob.exc import *
from coldsweat import *
from plugins import trigger_event
from models import *
from utilities import *
from translators import *
import markup
import filters
__all__ = [
'Fetcher',
'fetch_url'
]
FETCH_ICONS_DELTA = 30 # Days
class Fetcher(object):
'''
Fetch a single given feed
'''
def __init__(self, feed):
# Save timestamp for current fetch operation
self.instant = datetime.utcnow()
# Extract netloc
_, self.netloc, _, _, _ = urlparse.urlsplit(feed.self_link)
self.feed = feed
def handle_500(self, response):
'''
Internal server error
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has caused an error on server, skipped" % self.netloc)
raise HTTPInternalServerError
def handle_403(self, response):
'''
Forbidden
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s access was denied, skipped" % self.netloc)
raise HTTPForbidden
def handle_404(self, response):
'''
Not Found
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has been not found, skipped" % self.netloc)
raise HTTPNotFound
def handle_410(self, response):
'''
Gone
'''
self.feed.is_enabled = False
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s is gone, disabled" % self.netloc)
self._synthesize_entry('Feed has been removed from the origin server.')
raise HTTPGone
def handle_304(self, response):
'''
Not modified
'''
logger.debug(u"%s hasn't been modified, skipped" % self.netloc)
self.feed.last_status = response.status_code
raise HTTPNotModified
def handle_301(self, response):
'''
Moved permanently
'''
self_link = response.url
try:
Feed.get(self_link=self_link)
except Feed.DoesNotExist:
self.feed.self_link = self_link
self.feed.last_status = response.status_code
logger.info(u"%s has changed its location, updated to %s" % (self.netloc, self_link))
else:
self.feed.is_enabled = False
self.feed.last_status = DuplicatedFeedError.code
self.feed.error_count += 1
self._synthesize_entry('Feed has a duplicated web address.')
logger.warn(u"new %s location %s is duplicated, disabled" % (self.netloc, self_link))
raise DuplicatedFeedError
def handle_200(self, response):
'''
OK plus redirects
'''
self.feed.etag = response.headers.get('ETag', None)
# Save final status code discarding redirects
self.feed.last_status = response.status_code
handle_307 = handle_200 # Alias
handle_302 = handle_200 # Alias
def update_feed(self):
logger.debug(u"updating %s" % self.netloc)
# Check freshness
for value in [self.feed.last_checked_on, self.feed.last_updated_on]:
if not value:
continue
# No datetime.timedelta since we need to
# deal with large seconds values
delta = datetime_as_epoch(self.instant) - datetime_as_epoch(value)
if delta < config.fetcher.min_interval:
logger.debug(u"%s is below minimun fetch interval, skipped" % self.netloc)
return
try:
response = fetch_url(self.feed.self_link,
timeout=config.fetcher.timeout,
etag=self.feed.etag,
modified_since=self.feed.last_updated_on)
except RequestException:
# Record any network error as 'Service Unavailable'
self.feed.last_status = HTTPServiceUnavailable.code
self.feed.error_count += 1
logger.warn(u"a network error occured while fetching %s, skipped" % self.netloc)
self.check_feed_health()
self.feed.save()
return
self.feed.last_checked_on = self.instant
# Check if we got a redirect first
if response.history:
status = response.history[0].status_code
else:
status = response.status_code
try:
handler = getattr(self, 'handle_%d' % status, None)
if handler:
logger.debug(u"got status %s from server" % status)
handler(response)
else:
self.feed.last_status = status
logger.warn(u"%s replied with unhandled status %d, aborted" % (self.netloc, status))
return
self._parse_feed(response.text)
self._fetch_icon()
except HTTPNotModified:
pass # Nothing to do
except (HTTPError, DuplicatedFeedError):
self.check_feed_health()
finally:
self.feed.save()
def check_feed_health(self):
if config.fetcher.max_errors and self.feed.error_count > config.fetcher.max_errors:
self._synthesize_entry('Feed has accumulated too many errors (last was %s).' % filters.status_title(self.feed.last_status))
logger.warn(u"%s has accomulated too many errors, disabled" % self.netloc)
self.feed.is_enabled = False
return
def update_feed_with_data(self, data):
self._parse_feed(data)
self.feed.save()
def _parse_feed(self, data):
soup = feedparser.parse(data)
# Got parsing error?
if hasattr(soup, 'bozo') and soup.bozo:
logger.debug(u"%s caused a parser error (%s), tried to parse it anyway" % (self.netloc, soup.bozo_exception))
ft = FeedTranslator(soup.feed)
self.feed.last_updated_on = ft.get_timestamp(self.instant)
self.feed.alternate_link = ft.get_alternate_link()
self.feed.title = self.feed.title or ft.get_title() # Do not set again if already set
#entries = []
feed_author = ft.get_author()
for entry_dict in soup.entries:
t = EntryTranslator(entry_dict)
link = t.get_link()
guid = t.get_guid(default=link)
if not guid:
logger.warn(u'could not find GUID for entry from %s, skipped' % self.netloc)
continue
timestamp = t.get_timestamp(self.instant)
content_type, content = t.get_content(('text/plain', ''))
# Skip ancient entries
if config.fetcher.max_history and (self.instant - timestamp).days > config.fetcher.max_history:
logger.debug(u"entry %s from %s is over maximum history, skipped" % (guid, self.netloc))
continue
try:
# If entry is already in database with same hashed GUID, skip it
Entry.get(guid_hash=make_sha1_hash(guid))
logger.debug(u"duplicated entry %s, skipped" % guid)
continue
except Entry.DoesNotExist:
pass
entry = Entry(
feed = self.feed,
guid = guid,
link = link,
title = t.get_title(default='Untitled'),
author = t.get_author() or feed_author,
content = content,
content_type = content_type,
last_updated_on = timestamp
)
# At this point we are pretty sure we doesn't have the entry
# already in the database so alert plugins and save data
trigger_event('entry_parsed', entry, entry_dict)
entry.save()
#@@TODO: entries.append(entry)
logger.debug(u"parsed entry %s from %s" % (guid, self.netloc))
#return entries
def _fetch_icon(self):
if not self.feed.icon or not self.feed.icon_last_updated_on or (self.instant - self.feed.icon_last_updated_on).days > FETCH_ICONS_DELTA:
# Prefer alternate_link if available since self_link could
# point to Feed Burner or similar services
self.feed.icon = self._google_favicon_fetcher(self.feed.alternate_link or self.feed.self_link)
self.feed.icon_last_updated_on = self.instant
logger.debug(u"fetched favicon %s..." % (self.feed.icon[:70]))
def _google_favicon_fetcher(self, url):
'''
Fetch a site favicon via Google service
'''
endpoint = "http://www.google.com/s2/favicons?domain=%s" % urlparse.urlsplit(url).hostname
try:
response = fetch_url(endpoint)
except RequestException, exc:
logger.warn(u"could not fetch favicon for %s (%s)" % (url, exc))
return Feed.DEFAULT_ICON
return make_data_uri(response.headers['Content-Type'], response.content)
def add_synthesized_entry(self, title, content_type, content):
'''
Create an HTML entry for this feed
'''
# Since we don't know the mechanism the feed used to build a GUID for its entries
# synthesize an tag URI from the link and a random string. This makes
# entries internally generated by Coldsweat reasonably globally unique
guid = ENTRY_TAG_URI % make_sha1_hash(self.feed.self_link + make_nonce())
entry = Entry(
feed = self.feed,
guid = guid,
title = title,
author = 'Coldsweat',
content = content,
content_type = content_type,
last_updated_on = self.instant
)
entry.save()
logger.debug(u"synthesized entry %s" % guid)
return entry
def _synthesize_entry(self, reason):
title = u'This feed has been disabled'
content = render_template(os.path.join(template_dir, '_entry_feed_disabled.html'), {'reason': reason})
return self.add_synthesized_entry(title, 'text/html', content)
def fetch_url(url, timeout=10, etag=None, modified_since=None):
'''
Fecth a given URL optionally issuing a 'Conditional GET' request
'''
request_headers = {
'User-Agent': USER_AGENT
}
# Conditional GET headers
if etag and modified_since:
logger.debug(u"fetching %s with a conditional GET (%s %s)" % (url, etag, format_http_datetime(modified_since)))
request_headers['If-None-Match'] = etag
request_headers['If-Modified-Since'] = format_http_datetime(modified_since)
try:
response = requests.get(url, timeout=timeout, headers=request_headers)
except RequestException, exc:
logger.debug(u"tried to fetch %s but got %s" % (url, exc.__class__.__name__))
raise exc
return response
# ------------------------------------------------------
# Custom error codes 9xx & exceptions
# ------------------------------------------------------
class DuplicatedFeedError(Exception):
code = 900
title = 'Duplicated feed'
explanation = 'Feed address matches another already present in the database.'
# Update WebOb status codes map
for klass in (DuplicatedFeedError,):
status_map[klass.code] = klass
|
leo/modes/velocity.py | ATikhonov2/leo-editor | 1,550 | 44096 | # Leo colorizer control file for velocity mode.
# This file is in the public domain.
# Properties for velocity mode.
properties = {
"commentEnd": "*#",
"commentStart": "#*",
"lineComment": "##",
}
# Attributes dict for velocity_main ruleset.
velocity_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for velocity_velocity ruleset.
velocity_velocity_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for velocity_javascript ruleset.
velocity_javascript_attributes_dict = {
"default": "MARKUP",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for velocity_javascript2 ruleset.
velocity_javascript2_attributes_dict = {
"default": "MARKUP",
"digit_re": "(0x[[:xdigit:]]+[lL]?|[[:digit:]]+(e[[:digit:]]*)?[lLdDfF]?)",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for velocity_back_to_html ruleset.
velocity_back_to_html_attributes_dict = {
"default": "MARKUP",
"digit_re": "(0x[[:xdigit:]]+[lL]?|[[:digit:]]+(e[[:digit:]]*)?[lLdDfF]?)",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for velocity_css ruleset.
velocity_css_attributes_dict = {
"default": "MARKUP",
"digit_re": "(0x[[:xdigit:]]+[lL]?|[[:digit:]]+(e[[:digit:]]*)?[lLdDfF]?)",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for velocity_css2 ruleset.
velocity_css2_attributes_dict = {
"default": "MARKUP",
"digit_re": "[[:digit:]]+(pt|pc|in|mm|cm|em|ex|px|ms|s|%)",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "-_",
}
# Dictionary of attributes dictionaries for velocity mode.
attributesDictDict = {
"velocity_back_to_html": velocity_back_to_html_attributes_dict,
"velocity_css": velocity_css_attributes_dict,
"velocity_css2": velocity_css2_attributes_dict,
"velocity_javascript": velocity_javascript_attributes_dict,
"velocity_javascript2": velocity_javascript2_attributes_dict,
"velocity_main": velocity_main_attributes_dict,
"velocity_velocity": velocity_velocity_attributes_dict,
}
# Keywords dict for velocity_main ruleset.
velocity_main_keywords_dict = {}
# Keywords dict for velocity_velocity ruleset.
velocity_velocity_keywords_dict = {
"#else": "keyword1",
"#elseif": "keyword1",
"#end": "keyword1",
"#foreach": "keyword1",
"#if": "keyword1",
"#include": "keyword1",
"#macro": "keyword1",
"#parse": "keyword1",
"#set": "keyword1",
"#stop": "keyword1",
}
# Keywords dict for velocity_javascript ruleset.
velocity_javascript_keywords_dict = {}
# Keywords dict for velocity_javascript2 ruleset.
velocity_javascript2_keywords_dict = {}
# Keywords dict for velocity_back_to_html ruleset.
velocity_back_to_html_keywords_dict = {}
# Keywords dict for velocity_css ruleset.
velocity_css_keywords_dict = {}
# Keywords dict for velocity_css2 ruleset.
velocity_css2_keywords_dict = {}
# Dictionary of keywords dictionaries for velocity mode.
keywordsDictDict = {
"velocity_back_to_html": velocity_back_to_html_keywords_dict,
"velocity_css": velocity_css_keywords_dict,
"velocity_css2": velocity_css2_keywords_dict,
"velocity_javascript": velocity_javascript_keywords_dict,
"velocity_javascript2": velocity_javascript2_keywords_dict,
"velocity_main": velocity_main_keywords_dict,
"velocity_velocity": velocity_velocity_keywords_dict,
}
# Rules for velocity_main ruleset.
def velocity_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<SCRIPT", end="</SCRIPT>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="velocity::javascript",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<STYLE", end="</STYLE>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="velocity::css",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="<!", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="xml::dtd-tags",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="html::tags",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="literal2", begin="&", end=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=True)
# Rules dict for velocity_main ruleset.
rulesDict1 = {
"&": [velocity_rule5,],
"<": [velocity_rule0,velocity_rule1,velocity_rule2,velocity_rule3,velocity_rule4,],
}
# Rules for velocity_velocity ruleset.
def velocity_rule6(colorer, s, i):
return colorer.match_span(s, i, kind="comment2", begin="#*", end="*#",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def velocity_rule7(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment3", seq="##",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def velocity_rule8(colorer, s, i):
return colorer.match_span(s, i, kind="keyword3", begin="${", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def velocity_rule9(colorer, s, i):
return colorer.match_mark_following(s, i, kind="keyword3", pattern="$!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def velocity_rule10(colorer, s, i):
return colorer.match_mark_following(s, i, kind="keyword3", pattern="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def velocity_rule11(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for velocity_velocity ruleset.
rulesDict2 = {
"#": [velocity_rule6,velocity_rule7,velocity_rule11,],
"$": [velocity_rule8,velocity_rule9,velocity_rule10,],
"0": [velocity_rule11,],
"1": [velocity_rule11,],
"2": [velocity_rule11,],
"3": [velocity_rule11,],
"4": [velocity_rule11,],
"5": [velocity_rule11,],
"6": [velocity_rule11,],
"7": [velocity_rule11,],
"8": [velocity_rule11,],
"9": [velocity_rule11,],
"@": [velocity_rule11,],
"A": [velocity_rule11,],
"B": [velocity_rule11,],
"C": [velocity_rule11,],
"D": [velocity_rule11,],
"E": [velocity_rule11,],
"F": [velocity_rule11,],
"G": [velocity_rule11,],
"H": [velocity_rule11,],
"I": [velocity_rule11,],
"J": [velocity_rule11,],
"K": [velocity_rule11,],
"L": [velocity_rule11,],
"M": [velocity_rule11,],
"N": [velocity_rule11,],
"O": [velocity_rule11,],
"P": [velocity_rule11,],
"Q": [velocity_rule11,],
"R": [velocity_rule11,],
"S": [velocity_rule11,],
"T": [velocity_rule11,],
"U": [velocity_rule11,],
"V": [velocity_rule11,],
"W": [velocity_rule11,],
"X": [velocity_rule11,],
"Y": [velocity_rule11,],
"Z": [velocity_rule11,],
"a": [velocity_rule11,],
"b": [velocity_rule11,],
"c": [velocity_rule11,],
"d": [velocity_rule11,],
"e": [velocity_rule11,],
"f": [velocity_rule11,],
"g": [velocity_rule11,],
"h": [velocity_rule11,],
"i": [velocity_rule11,],
"j": [velocity_rule11,],
"k": [velocity_rule11,],
"l": [velocity_rule11,],
"m": [velocity_rule11,],
"n": [velocity_rule11,],
"o": [velocity_rule11,],
"p": [velocity_rule11,],
"q": [velocity_rule11,],
"r": [velocity_rule11,],
"s": [velocity_rule11,],
"t": [velocity_rule11,],
"u": [velocity_rule11,],
"v": [velocity_rule11,],
"w": [velocity_rule11,],
"x": [velocity_rule11,],
"y": [velocity_rule11,],
"z": [velocity_rule11,],
}
# Rules for velocity_javascript ruleset.
def velocity_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="markup", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="velocity::javascript2")
def velocity_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="markup", seq="SRC=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="velocity::back_to_html")
# Rules dict for velocity_javascript ruleset.
rulesDict3 = {
">": [velocity_rule12,],
"S": [velocity_rule13,],
}
# Rules for velocity_javascript2 ruleset.
# Rules dict for velocity_javascript2 ruleset.
rulesDict4 = {}
# Rules for velocity_back_to_html ruleset.
def velocity_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="markup", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="velocity::main")
# Rules dict for velocity_back_to_html ruleset.
rulesDict5 = {
">": [velocity_rule14,],
}
# Rules for velocity_css ruleset.
def velocity_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="markup", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="velocity::css2")
# Rules dict for velocity_css ruleset.
rulesDict6 = {
">": [velocity_rule15,],
}
# Rules for velocity_css2 ruleset.
# Rules dict for velocity_css2 ruleset.
rulesDict7 = {}
# x.rulesDictDict for velocity mode.
rulesDictDict = {
"velocity_back_to_html": rulesDict5,
"velocity_css": rulesDict6,
"velocity_css2": rulesDict7,
"velocity_javascript": rulesDict3,
"velocity_javascript2": rulesDict4,
"velocity_main": rulesDict1,
"velocity_velocity": rulesDict2,
}
# Import dict for velocity mode.
importDict = {
"velocity_css2": ["velocity_css2::velocity","css::main",],
"velocity_javascript2": ["velocity_javascript2::velocity","javascript::main",],
"velocity_main": ["velocity_main::velocity",],
}
|
phasing/utils/tag_bam_post_phasing.py | ArthurDondi/cDNA_Cupcake | 205 | 44099 | <reponame>ArthurDondi/cDNA_Cupcake
__author__ = "<EMAIL>"
"""
Tagging BAM files with phasing info
"""
import pysam
from csv import DictReader
def main(read_bam, hap_info, output_bam, celltype_file=None):
d = {}
celltype_info = {}
#for r in DictReader(open('phased.partial.cleaned.hap_info.txt'),delimiter=','):
for r in DictReader(open(hap_info), delimiter=','):
d[r['id']] = r['hap_postclean']
if celltype_file is not None:
for r in DictReader(open(celltype_file), delimiter=','):
if r['id'] in d: celltype_info[r['id']] = r['Celltype'].replace(' ','_').replace(':','_')
reader = pysam.AlignmentFile(read_bam, 'rb', check_sq=False)
f2 = pysam.AlignmentFile(output_bam, 'wb', header=reader.header)
for r in reader:
d2 = r.to_dict()
if r.qname in d:
d2['tags'].append('RG:Z:' + str(d[r.qname]))
if r.qname in celltype_info: d2['tags'].append('XC:Z:' + str(celltype_info[r.qname]))
else: d2['tags'].append('RG:Z:NA')
x = pysam.AlignedSegment.from_dict(d2, r.header)
f2.write(x)
f2.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser("Tagging BAM files with phasing info")
parser.add_argument("read_bam", help="Aligned BAM file that be tagged")
parser.add_argument("hap_info", help="Comma-delimited hap info CSV, must have column 'id' and 'hap_postclean'")
parser.add_argument("output_bam", help="Output tagged BAM filename")
parser.add_argument("--celltype", default=None, help="[Optional] Comma-delimited celltype info CSV, must have column 'id' and 'Celltype'")
args = parser.parse_args()
main(args.read_bam, args.hap_info, args.output_bam, args.celltype)
|
pudzu/sandbox/tinytim.py | Udzu/pudzu | 119 | 44144 | from collections import namedtuple
from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition # pylint: disable=no-name-in-module
from pudzu.utils import weighted_choice
class LSystem:
Rule = namedtuple("Rule", "predecessor successor weight", defaults=(1.0,))
def __init__(self, axiom, rules, angle=4):
self.axiom = axiom
self.angle = 360 / angle
self.rules = {}
self.weights = {}
for rule in rules:
pr = self.Rule(*rule)
self.rules.setdefault(pr.predecessor, []).append(pr.successor)
self.weights.setdefault(pr.predecessor, []).append(pr.weight)
def expand(self, iterations):
state = self.axiom
for _ in range(iterations):
state = "".join([weighted_choice(self.rules.get(c, [c]), self.weights.get(c, [1])) for c in state])
return state
def plot(self, screen, iterations, size, reset=True, tracer=(0, 0)):
if reset:
screen.clearscreen()
screen.tracer(*tracer)
stack = []
for c in self.expand(iterations):
if c == "F":
fd(size)
elif c == "G":
pu()
fd(size)
pd()
elif c == "+":
rt(self.angle)
elif c == "-":
lt(self.angle)
elif c == "[":
stack.append((position(), heading()))
elif c == "]":
p, h = stack.pop()
pu()
setposition(p)
setheading(h)
pd()
screen.update()
Koch = LSystem("F--F--F", [("F", "F+F--F+F")], 6)
Dragon = LSystem("FX", [("F", ""), ("Y", "+FX--FY+"), ("X", "-FX++FY-")], 8)
Plant07 = LSystem("Z", [("Z", "ZFX[+Z][-Z]"), ("X", "X[-FFF][+FFF]FX")], 14)
Plant08 = LSystem("SLFFF", [("S", "[+++Z][---Z]TS"), ("Z", "+H[-Z]L"), ("H", "-Z[+H]L"), ("T", "TL"), ("L", "[-FFF][+FFF]F")], 20)
Sierpinski = LSystem("AF", [("A", "BF+AF+BF"), ("B", "AF-BF-AF"), ("F", "")], 6)
Barnsley = LSystem("X", [("X", "F+[[X]-X]-F[-FX]+X"), ("F", "FF")], 14.4)
RandomWalk = LSystem("F", [("F", "FF"), ("F", "F+F"), ("F", "F++F"), ("F", "F-F")], 4)
|
examples/pytorch/diffpool/model/tensorized_layers/assignment.py | ketyi/dgl | 9,516 | 44156 | import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from model.tensorized_layers.graphsage import BatchedGraphSAGE
class DiffPoolAssignment(nn.Module):
def __init__(self, nfeat, nnext):
super().__init__()
self.assign_mat = BatchedGraphSAGE(nfeat, nnext, use_bn=True)
def forward(self, x, adj, log=False):
s_l_init = self.assign_mat(x, adj)
s_l = F.softmax(s_l_init, dim=-1)
return s_l
|
Algo and DSA/LeetCode-Solutions-master/Python/insert-delete-getrandom-o1.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 44171 | # Time: O(1)
# Space: O(n)
from random import randint
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__set = []
self.__used = {}
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.__used:
return False
self.__set += val,
self.__used[val] = len(self.__set)-1
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.__used:
return False
self.__used[self.__set[-1]] = self.__used[val]
self.__set[self.__used[val]], self.__set[-1] = self.__set[-1], self.__set[self.__used[val]]
self.__used.pop(val)
self.__set.pop()
return True
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
return self.__set[randint(0, len(self.__set)-1)]
|
examples/pubsub_broadcaster_server_example.py | sondrelg/fastapi_websocket_pubsub | 125 | 44192 | """
Multiple Servers linked via broadcaster example.
To run this example.
- 0. Setup a broadcast medium and pass its configuration to the endpoint (e.g. postgres on 'postgres://localhost:5432/' )
- 1. run this script for the servers (as many instances as you'd like) - use the PORT env-variable to run them on different ports
- 2. once the servers are up, run notifier_client_test.py and connect to one of them
- 3. send get request to one server on: '/trigger'
- 4. See that the client recives the event -no matter which server you connected it to, or which server got the initial trigger to publish
"""
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.basename(__file__), "..")))
from fastapi_websocket_pubsub import PubSubEndpoint
import asyncio
import os
from starlette.websockets import WebSocket
import uvicorn
from fastapi import FastAPI
from fastapi.routing import APIRouter
PORT = int(os.environ.get("PORT") or "8000")
app = FastAPI()
router = APIRouter()
endpoint = PubSubEndpoint(broadcaster="postgres://localhost:5432/")
@router.websocket("/pubsub")
async def websocket_rpc_endpoint(websocket: WebSocket):
async with endpoint.broadcaster:
await endpoint.main_loop(websocket)
app.include_router(router)
async def events():
await asyncio.sleep(1)
await endpoint.publish(["guns", "germs"])
await asyncio.sleep(1)
await endpoint.publish(["germs"])
await asyncio.sleep(1)
await endpoint.publish(["steel"])
@app.get("/trigger")
async def trigger_events():
asyncio.create_task(events())
uvicorn.run(app, host="0.0.0.0", port=PORT)
|
Chapter03/plot_convolution.py | arifmudi/Advanced-Deep-Learning-with-Python | 107 | 44209 | import matplotlib.pyplot as plt
import numpy as np
def plot_convolution(f, g):
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.plot(f, color='blue', label='f')
ax1.legend()
ax2.set_yticklabels([])
ax2.set_xticklabels([])
ax2.plot(g, color='red', label='g')
ax2.legend()
filtered = np.convolve(f, g, "same") / sum(g)
ax3.set_yticklabels([])
ax3.set_xticklabels([])
ax3.plot(filtered, color='green', label='f * g')
ax3.legend()
plt.show()
def plot_convolution_step_by_step(f, g):
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1)
ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.plot(f, color='blue', label='f')
ax1.plot(np.roll(g, -10000), color='red', label='g')
ax2.set_yticklabels([])
ax2.set_xticklabels([])
ax2.plot(f, color='blue', label='f')
ax2.plot(np.roll(g, -5000), color='red', label='g')
ax3.set_yticklabels([])
ax3.set_xticklabels([])
ax3.plot(f, color='blue', label='f')
ax3.plot(g, color='red', label='g')
ax4.set_yticklabels([])
ax4.set_xticklabels([])
ax4.plot(f, color='blue', label='f')
ax4.plot(np.roll(g, 5000), color='red', label='g')
ax5.set_yticklabels([])
ax5.set_xticklabels([])
ax5.plot(f, color='blue', label='f')
ax5.plot(np.roll(g, 10000), color='red', label='g')
plt.show()
signal = np.zeros(30000)
signal[10000:20000] = 1
kernel = np.zeros(30000)
kernel[10000:20000] = np.linspace(1, 0, 10000)
plot_convolution(signal, kernel)
plot_convolution_step_by_step(signal, kernel)
|
rocAL/rocAL_pybind/example/tf_mnistTrainingExample/tf_mnist_classification_rali.py | asalmanp/MIVisionX | 153 | 44215 |
from __future__ import print_function
from amd.rali.plugin.tf import RALIIterator
from amd.rali.pipeline import Pipeline
import amd.rali.ops as ops
import amd.rali.types as types
import sys
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
############################### HYPER PARAMETERS FOR TRAINING ###############################
learning_rate = 0.001
image_size = 28
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
############################### HYPER PARAMETERS FOR TRAINING ###############################
def get_label_one_hot(label_ndArray):
one_hot_vector_list = []
for label in label_ndArray:
one_hot_vector = np.zeros(num_classes)
np.put(one_hot_vector, label - 1, 1)
one_hot_vector_list.append(one_hot_vector)
return one_hot_vector_list
# Create model
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
#helper function not used in training
def decode(tfrecord_serialized):
tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
'image/height': tf.FixedLenFeature([], tf.int64),
'image/width': tf.FixedLenFeature([], tf.int64),
'image/class/label': tf.FixedLenFeature([], tf.int64),
'image/raw': tf.FixedLenFeature([], tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image/raw'], tf.float32)
image.set_shape([784])
label = tf.cast(tfrecord_features['image/class/label'], tf.int32)
# image_batch, label_batch = tf.train.batch([image, label], batch_size=bs)
return image, label
#RALI pipeline
class HybridPipe(Pipeline):
def __init__(self, feature_key_map, tfrecordreader_type, batch_size, num_threads, device_id, data_dir, crop, rali_cpu = True):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id,rali_cpu=rali_cpu)
self.input = ops.TFRecordReader(path=data_dir, index_path = "", reader_type=tfrecordreader_type, user_feature_key_map=feature_key_map,
features={
'image/encoded':tf.FixedLenFeature((), tf.string, ""),
'image/class/label':tf.FixedLenFeature([1], tf.int64, -1),
'image/filename':tf.FixedLenFeature((), tf.string, "")
},
)
rali_device = 'cpu' if rali_cpu else 'gpu'
decoder_device = 'cpu' if rali_cpu else 'mixed'
self.decode = ops.ImageDecoderRaw(user_feature_key_map=feature_key_map, device=decoder_device, output_type=types.RGB)
#self.res = ops.Resize(device=rali_device, resize_x=crop[0], resize_y=crop[1])
self.cmnp = ops.CropMirrorNormalize(device="cpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop,crop),
image_type=types.GRAY,
mean=[0 ,0,0],
std=[255,255,255], mirror=0)
self.coin = ops.CoinFlip(probability=0.5)
print('rali "{0}" variant'.format(rali_device))
def define_graph(self):
inputs = self.input(name ="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"]
images = self.decode(images)
#rng = self.coin()
output = self.cmnp(images)
return [output, labels]
# compute accuracy
def compute_accuracy(predictions, labels):
correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
return accuracy
def train_mnist_rali(data_path, _rali_cpu, batch_size):
# setup keep_prob
input_X = tf.placeholder('float32',shape = (batch_size,784))
labels = tf.placeholder('float32',shape = (batch_size,10))
logits = neural_net(input_X)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits), name="loss" )
optimizer = tf.train.AdamOptimizer().minimize(cost)
train_prediction = tf.nn.softmax(logits)
accuracy = compute_accuracy(train_prediction, labels)
#correct_label = tf.argmax(labels, 1)
num_epochs = 10
crop_size = 28
TFRecordReaderType = 0
featureKeyMap = {
'image/encoded':'image_raw',
'image/class/label':'label',
'image/filename':''
}
trainPipe = HybridPipe(feature_key_map=featureKeyMap, tfrecordreader_type=TFRecordReaderType, batch_size=batch_size, num_threads=1, device_id=0, data_dir=data_path+"/train", crop=crop_size, rali_cpu=_rali_cpu)
valPipe = HybridPipe(feature_key_map=featureKeyMap, tfrecordreader_type=TFRecordReaderType, batch_size=batch_size, num_threads=1, device_id=0, data_dir=data_path+"/val", crop=crop_size, rali_cpu=_rali_cpu)
trainPipe.build()
valPipe.build()
trainIterator = RALIIterator(trainPipe)
valIterator = RALIIterator(valPipe)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(num_epochs):
print('\n\n----------------------------Training Model for Epoch: ', epoch, "-----------------------------------------------")
epoch_loss = 0
train_accuracy = 0
for i, (image_train, label_train) in enumerate(trainIterator, 0):
image_train_res = image_train.reshape(batch_size, 784)
train_label_one_hot_list = get_label_one_hot(label_train)
_, c, tacc = sess.run([optimizer, cost, accuracy], feed_dict={input_X:image_train_res, labels: train_label_one_hot_list})
epoch_loss += c
train_accuracy += tacc
print('Epoch', epoch, 'completed out of',num_epochs,'loss:',epoch_loss, 'accuracy:',(train_accuracy*100)/i, 'count :', i)
#run evaluation for every epoch
mean_acc = 0
print("\n\n----------------------------Evaluating Model ---------------------------------------------------------------")
for j, (val_image_ndArray, val_label_ndArray) in enumerate(valIterator, 0):
#val_image_ndArray_transposed = np.transpose(val_image_ndArray, [0, 2, 3, 1])
val_image_ndArray_res = val_image_ndArray.reshape(batch_size, 784)
val_label_one_hot_list = get_label_one_hot(val_label_ndArray)
val_accuracy = sess.run(accuracy,
#[optimizer, accuracy, prediction, correct_label, correct_pred],
feed_dict={input_X: val_image_ndArray_res, labels: val_label_one_hot_list})
mean_acc += val_accuracy
#mean_loss = mean_loss + val_loss
#num_correct_predicate = 0
#for predicate in correct_predicate:
# if predicate == True:
# num_correct_predicate += 1
#print ("Step :: %s\tTarget :: %s\tPrediction :: %s\tCorrect Predictions :: %s/%s\tValidation Loss :: %.2f\tValidation Accuracy :: %.2f%%\t" % (j, val_target, val_prediction, num_correct_predicate, len(correct_predicate), val_loss, (val_accuracy * 100)))
mean_acc = (mean_acc * 100) / j
#mean_loss = (mean_loss * 100)/ j
print("\nSUMMARY:\nMean Accuracy :: %.2f%% count: %d" % (mean_acc, j))
def main():
if len(sys.argv) < 4:
print ('Please pass mnistTFRecord_dir cpu/gpu batch_size')
exit(0)
image_path = sys.argv[1]
if(sys.argv[2] == "cpu"):
_rali_cpu = True
else:
_rali_cpu = False
bs = int(sys.argv[3])
train_mnist_rali(image_path, _rali_cpu, bs)
if __name__ == '__main__':
main()
|
tests/test_casefold_migration.py | clmnin/sydent | 220 | 44234 | <gh_stars>100-1000
import json
import os.path
from unittest.mock import patch
from twisted.trial import unittest
from scripts.casefold_db import (
calculate_lookup_hash,
update_global_associations,
update_local_associations,
)
from sydent.util import json_decoder
from sydent.util.emailutils import sendEmail
from tests.utils import make_sydent
class MigrationTestCase(unittest.TestCase):
def create_signedassoc(self, medium, address, mxid, ts, not_before, not_after):
return {
"medium": medium,
"address": address,
"mxid": mxid,
"ts": ts,
"not_before": not_before,
"not_after": not_after,
}
def setUp(self):
# Create a new sydent
config = {
"general": {
"templates.path": os.path.join(
os.path.dirname(os.path.dirname(__file__)), "res"
),
},
"crypto": {
"ed25519.signingkey": "<KEY>"
},
}
self.sydent = make_sydent(test_config=config)
# create some local associations
associations = []
for i in range(10):
address = "<EMAIL>" % i
associations.append(
{
"medium": "email",
"address": address,
"lookup_hash": calculate_lookup_hash(self.sydent, address),
"mxid": "@bob%d:example.com" % i,
"ts": (i * 10000),
"not_before": 0,
"not_after": 99999999999,
}
)
# create some casefold-conflicting associations
for i in range(5):
address = "<EMAIL>" % i
associations.append(
{
"medium": "email",
"address": address,
"lookup_hash": calculate_lookup_hash(self.sydent, address),
"mxid": "@otherbob%d:example.com" % i,
"ts": (i * 10000),
"not_before": 0,
"not_after": 99999999999,
}
)
# add all associations to db
cur = self.sydent.db.cursor()
cur.executemany(
"INSERT INTO local_threepid_associations "
"(medium, address, lookup_hash, mxid, ts, notBefore, notAfter) "
"VALUES (?, ?, ?, ?, ?, ?, ?)",
[
(
assoc["medium"],
assoc["address"],
assoc["lookup_hash"],
assoc["mxid"],
assoc["ts"],
assoc["not_before"],
assoc["not_after"],
)
for assoc in associations
],
)
self.sydent.db.commit()
# create some global associations
associations = []
originServer = self.sydent.config.general.server_name
for i in range(10):
address = "<EMAIL>" % i
mxid = "@bob%d:example.com" % i
ts = 10000 * i
associations.append(
{
"medium": "email",
"address": address,
"lookup_hash": calculate_lookup_hash(self.sydent, address),
"mxid": mxid,
"ts": ts,
"not_before": 0,
"not_after": 99999999999,
"originServer": originServer,
"originId": i,
"sgAssoc": json.dumps(
self.create_signedassoc(
"email", address, mxid, ts, 0, 99999999999
)
),
}
)
# create some casefold-conflicting associations
for i in range(5):
address = "<EMAIL>" % i
mxid = "@BOB%d:example.com" % i
ts = 10000 * i
associations.append(
{
"medium": "email",
"address": address,
"lookup_hash": calculate_lookup_hash(self.sydent, address),
"mxid": mxid,
"ts": ts + 1,
"not_before": 0,
"not_after": 99999999999,
"originServer": originServer,
"originId": i + 10,
"sgAssoc": json.dumps(
self.create_signedassoc(
"email", address, mxid, ts, 0, 99999999999
)
),
}
)
# add all associations to db
cur = self.sydent.db.cursor()
cur.executemany(
"INSERT INTO global_threepid_associations "
"(medium, address, lookup_hash, mxid, ts, notBefore, notAfter, originServer, originId, sgAssoc) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[
(
assoc["medium"],
assoc["address"],
assoc["lookup_hash"],
assoc["mxid"],
assoc["ts"],
assoc["not_before"],
assoc["not_after"],
assoc["originServer"],
assoc["originId"],
assoc["sgAssoc"],
)
for assoc in associations
],
)
self.sydent.db.commit()
def test_migration_email(self):
with patch("sydent.util.emailutils.smtplib") as smtplib:
# self.sydent.config.email.template is deprecated
if self.sydent.config.email.template is None:
templateFile = self.sydent.get_branded_template(
None,
"migration_template.eml",
)
else:
templateFile = self.sydent.config.email.template
sendEmail(
self.sydent,
templateFile,
"<EMAIL>",
{
"mxid": "@bob:example.com",
"subject_header_value": "MatrixID Deletion",
},
)
smtp = smtplib.SMTP.return_value
email_contents = smtp.sendmail.call_args[0][2].decode("utf-8")
self.assertIn("In the past", email_contents)
# test email was sent
smtp.sendmail.assert_called()
def test_local_db_migration(self):
with patch("sydent.util.emailutils.smtplib") as smtplib:
update_local_associations(
self.sydent,
self.sydent.db,
send_email=True,
dry_run=False,
test=True,
)
# test 5 emails were sent
smtp = smtplib.SMTP.return_value
self.assertEqual(smtp.sendmail.call_count, 5)
# don't send emails to people who weren't affected
self.assertNotIn(
smtp.sendmail.call_args_list,
[
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
],
)
# make sure someone who is affected gets email
self.assertIn("<EMAIL>", smtp.sendmail.call_args_list[0][0])
cur = self.sydent.db.cursor()
res = cur.execute("SELECT * FROM local_threepid_associations")
db_state = res.fetchall()
# five addresses should have been deleted
self.assertEqual(len(db_state), 10)
# iterate through db and make sure all addresses are casefolded and hash matches casefolded address
for row in db_state:
casefolded = row[2].casefold()
self.assertEqual(row[2], casefolded)
self.assertEqual(
calculate_lookup_hash(self.sydent, row[2]),
calculate_lookup_hash(self.sydent, casefolded),
)
def test_global_db_migration(self):
update_global_associations(
self.sydent,
self.sydent.db,
dry_run=False,
)
cur = self.sydent.db.cursor()
res = cur.execute("SELECT * FROM global_threepid_associations")
db_state = res.fetchall()
# five addresses should have been deleted
self.assertEqual(len(db_state), 10)
# iterate through db and make sure all addresses are casefolded and hash matches casefolded address
# and make sure the casefolded address matches the address in sgAssoc
for row in db_state:
casefolded = row[2].casefold()
self.assertEqual(row[2], casefolded)
self.assertEqual(
calculate_lookup_hash(self.sydent, row[2]),
calculate_lookup_hash(self.sydent, casefolded),
)
sgassoc = json_decoder.decode(row[9])
self.assertEqual(row[2], sgassoc["address"])
def test_local_no_email_does_not_send_email(self):
with patch("sydent.util.emailutils.smtplib") as smtplib:
update_local_associations(
self.sydent,
self.sydent.db,
send_email=False,
dry_run=False,
test=True,
)
smtp = smtplib.SMTP.return_value
# test no emails were sent
self.assertEqual(smtp.sendmail.call_count, 0)
def test_dry_run_does_nothing(self):
# reset DB
self.setUp()
cur = self.sydent.db.cursor()
# grab a snapshot of global table before running script
res1 = cur.execute("SELECT mxid FROM global_threepid_associations")
list1 = res1.fetchall()
with patch("sydent.util.emailutils.smtplib") as smtplib:
update_global_associations(
self.sydent,
self.sydent.db,
dry_run=True,
)
# test no emails were sent
smtp = smtplib.SMTP.return_value
self.assertEqual(smtp.sendmail.call_count, 0)
res2 = cur.execute("SELECT mxid FROM global_threepid_associations")
list2 = res2.fetchall()
self.assertEqual(list1, list2)
# grab a snapshot of local table db before running script
res3 = cur.execute("SELECT mxid FROM local_threepid_associations")
list3 = res3.fetchall()
with patch("sydent.util.emailutils.smtplib") as smtplib:
update_local_associations(
self.sydent,
self.sydent.db,
send_email=True,
dry_run=True,
test=True,
)
# test no emails were sent
smtp = smtplib.SMTP.return_value
self.assertEqual(smtp.sendmail.call_count, 0)
res4 = cur.execute("SELECT mxid FROM local_threepid_associations")
list4 = res4.fetchall()
self.assertEqual(list3, list4)
|
test_soundcard.py | bastibe/pysound | 490 | 44236 | import sys
import soundcard
import numpy
import pytest
skip_if_not_linux = pytest.mark.skipif(sys.platform != 'linux', reason='Only implemented for PulseAudio so far')
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@skip_if_not_linux
def test_name():
# The default is the application name, so when run from pytest,
# it’s “pytest” or “_jb_pytest_runner.py” or so.
assert 'pytest' in soundcard.get_name()
soundcard.set_name('testapp')
assert soundcard.get_name() == 'testapp'
@skip_if_not_linux
@pytest.mark.parametrize("argv,progname", [
(["./script.py"], "script.py"), # chmod +x script.py; ./script.py
(["path/to/script.py"], "script.py"), # python path/to/script.py or
# python -m path.to.script
(["module/__main__.py"], "module"), # python -m module
(["-m", "module.submodule"], "module.submodule"), # rare unresolved case
(["-c", "import soundcard; soundcard.foo()"], "import soundcard; soundcard.fo..."),
])
def test_infer_name(monkeypatch, argv, progname):
infer = soundcard.pulseaudio._PulseAudio._infer_program_name
monkeypatch.setattr(sys, "argv", argv)
assert infer() == progname
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
|
migrations/versions/c0e8d68e84fa_added_anomaly_config_to_kpi.py | eltociear/chaos_genius | 320 | 44237 | <gh_stars>100-1000
"""added anomaly config to kpi
Revision ID: c0e8d68e84fa
Revises: <PASSWORD>
Create Date: 2021-09-02 09:08:21.174195
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c0e8d68e84fa'
down_revision = '<PASSWORD>0d4ab3bc9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('kpi', sa.Column('anomaly_params', sa.JSON(), nullable=True))
op.add_column('kpi', sa.Column('anomaly_frequency', sa.String(length=80), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('kpi', 'anomaly_frequency')
op.drop_column('kpi', 'anomaly_params')
# ### end Alembic commands ###
|
chromecast/tools/build/package_test_deps.py | zealoussnow/chromium | 14,668 | 44251 | #!/usr/bin/env python
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Packages test dependencies as tar.gz file."""
import argparse
import json
import logging
import os
import sys
import tarfile
parser = argparse.ArgumentParser(
description='Package test dependencies as tar.gz files.')
parser.add_argument('--output', required=True,
help='Full path to the output file.')
parser.add_argument('--deps_list_path', required=True,
help='Full path to the json dependencies file.')
parser.add_argument('--exclude_deps', required=False,
default='',
help=('Comma separated list of dependencies to exclude'
' from tar.gz file.'))
parser.add_argument('--additional_deps', required=False,
default='',
help=('Comma separated list of additional deps'
' to include in tar.gz.'))
def read_dependencies(file_path):
"""Reads a json file and creates an iterable of unique dependencies.
Args:
file_path: The path to the runtime dependencies file.
Returns:
An iterable with unique dependencies.
"""
deps = None
with open(file_path) as deps_file:
deps = json.load(deps_file)
deps_set = set()
for _, dep_list in deps.items():
deps_set.update(dep_list)
return deps_set
def filter_dependencies(dependencies, filters):
"""Filters out dependencies from a dependencies iterable.
Args:
dependencies: An iterable with the full list of dependencies.
filters: A list of dependencies to remove.
Returns:
An iterable with the filtered dependencies.
"""
filters_list = filters.strip(',').split(',')
logging.info('Filtering: %s', filters_list)
filtered_deps = set()
for dep in dependencies:
norm_dep = os.path.normpath(dep)
if not any(norm_dep.startswith(f) for f in filters_list):
filtered_deps.add(norm_dep)
return filtered_deps
def create_tarfile(output_path, dependencies):
"""Creates a tar.gz file and saves it to output_path.
Args:
output_path: A string with the path to where tar.gz file will be saved to.
dependencies: An iterable with file/folders test dependencies.
"""
total_deps = len(dependencies)
if total_deps < 1:
logging.error('There are no dependencies to archive')
sys.exit(1)
step = (total_deps / 10) or 1
logging.info('Adding %s files', total_deps)
with tarfile.open(output_path, 'w:gz') as tar_file:
for idx, dep in enumerate(dependencies):
dep = os.path.normpath(dep)
archive_name = os.path.join('fuchsia/release', dep)
archive_name = os.path.normpath(archive_name)
tar_file.add(dep, arcname=archive_name)
if idx % step == 0 or idx == (total_deps - 1):
logging.info('Progress: %s percent', int(round(100.0/total_deps * idx)))
def main():
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
dependencies = read_dependencies(args.deps_list_path)
if args.additional_deps:
to_include = args.additional_deps.strip(',').split(',')
logging.info('Including: %s', to_include)
dependencies.update(to_include)
if args.exclude_deps:
dependencies = filter_dependencies(dependencies, args.exclude_deps)
create_tarfile(args.output, dependencies)
if __name__ == '__main__':
main()
|
hls4ml/backends/fpga/fpga_types.py | jaemyungkim/hls4ml | 380 | 44253 | <reponame>jaemyungkim/hls4ml
import numpy as np
from hls4ml.model.types import CompressedType, NamedType, ExponentType, FixedPrecisionType, IntegerPrecisionType, XnorPrecisionType, ExponentPrecisionType, TensorVariable, PackedType, WeightVariable
#region Precision types
class PrecisionDefinition(object):
def definition_cpp(self):
raise NotImplementedError
class APIntegerPrecisionDefinition(PrecisionDefinition):
def definition_cpp(self):
typestring = 'ap_{signed}int<{width}>'.format(signed='u' if not self.signed else '', width=self.width)
return typestring
class APFixedPrecisionDefinition(PrecisionDefinition):
def _rounding_mode_cpp(self, mode):
if mode is not None:
return 'AP_' + str(mode)
def _saturation_mode_cpp(self, mode):
if mode is not None:
return 'AP_' + str(mode)
def definition_cpp(self):
args = [self.width, self.integer, self._rounding_mode_cpp(self.rounding_mode), self._saturation_mode_cpp(self.saturation_mode), self.saturation_bits]
args = ','.join([str(arg) for arg in args if arg is not None])
typestring = 'ap_{signed}fixed<{args}>'.format(signed='u' if not self.signed else '', args=args)
return typestring
class ACIntegerPrecisionDefinition(PrecisionDefinition):
def definition_cpp(self):
typestring = 'ac_int<{width}, {signed}>'.format(width=self.width, signed=str(self.signed).lower())
return typestring
class ACFixedPrecisionDefinition(PrecisionDefinition):
def _rounding_mode_cpp(self, mode):
if mode is not None:
return 'AC_' + str(mode)
def _saturation_mode_cpp(self, mode):
if mode is not None:
return 'AC_' + str(mode)
def definition_cpp(self):
args = [self.width, self.integer, str(self.signed).lower(), self._rounding_mode_cpp(self.rounding_mode), self._saturation_mode_cpp(self.saturation_mode), self.saturation_bits]
args = ','.join([str(arg) for arg in args if arg is not None])
typestring = 'ac_fixed<{args}>'.format(args=args)
return typestring
class PrecisionConverter(object):
def convert(self, precision_type):
raise NotImplementedError
class FixedPrecisionConverter(PrecisionConverter):
def __init__(self, type_map, prefix):
self.type_map = type_map
self.prefix = prefix
def convert(self, precision_type):
type_cls = type(precision_type)
type_cls_name = type_cls.__name__
# If the type is already converted, do nothing
if type_cls_name.startswith(self.prefix):
return precision_type
definition_cls = self.type_map.get(type_cls, None)
if definition_cls is not None:
precision_type.__class__ = type(self.prefix + type_cls_name, (type_cls, definition_cls), {})
return precision_type
else:
raise Exception('Cannot convert precision type to {}: {}'.format(self.prefix, precision_type.__class__.__name__))
class APTypeConverter(FixedPrecisionConverter):
def __init__(self):
super().__init__(
type_map={
FixedPrecisionType: APFixedPrecisionDefinition,
IntegerPrecisionType: APIntegerPrecisionDefinition,
ExponentPrecisionType: APIntegerPrecisionDefinition,
XnorPrecisionType: APIntegerPrecisionDefinition,
},
prefix='AP'
)
class ACTypeConverter(FixedPrecisionConverter):
def __init__(self):
super().__init__(
type_map={
FixedPrecisionType: ACFixedPrecisionDefinition,
IntegerPrecisionType: ACIntegerPrecisionDefinition,
ExponentPrecisionType: ACIntegerPrecisionDefinition,
XnorPrecisionType: ACIntegerPrecisionDefinition,
},
prefix='AC'
)
#endregion
#region Data types
class TypeDefinition(object):
def definition_cpp(self):
raise NotImplementedError
class TypePrecisionConverter(object):
def convert_precision(self, precision_converter):
self.precision = precision_converter.convert(self.precision)
class NamedTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
return 'typedef {precision} {name};\n'.format(name=self.name, precision=self.precision.definition_cpp())
class CompressedTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
cpp_fmt = (
'typedef struct {name} {{'
'{index} row_index;'
'{index} col_index;'
'{precision} weight; }} {name};\n'
)
return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision.definition_cpp())
def convert_precision(self, precision_converter):
super().convert_precision(precision_converter)
self.index_precision = precision_converter.convert(self.index_precision)
class ExponentTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
cpp_fmt = (
'typedef struct {name} {{'
'{sign} sign;'
'{precision} weight; }} {name};\n'
)
return cpp_fmt.format(name=self.name, precision=self.precision.definition_cpp(), sign=self.sign.definition_cpp())
def convert_precision(self, precision_converter):
super().convert_precision(precision_converter)
self.sign = precision_converter.convert(self.sign)
class PackedTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
n_elem_expr = '/' if self.unpack else '*'
return 'typedef nnet::array<{precision}, {n_elem}> {name};\n'.format(name=self.name, precision=self.precision.definition_cpp(), n_elem=str(self.n_elem) + n_elem_expr + str(self.n_pack))
class HLSTypeConverter(object):
def __init__(self, precision_converter):
self.precision_converter = precision_converter
self.type_map = {
NamedType: NamedTypeConverter,
CompressedType: CompressedTypeConverter,
ExponentType: ExponentTypeConverter,
PackedType: PackedTypeConverter,
}
def convert(self, atype):
type_cls = type(atype)
type_cls_name = type_cls.__name__
# If the type is already converted, do nothing
if type_cls_name.startswith('HLS'):
return atype
conversion_cls = self.type_map.get(type_cls, None)
if conversion_cls is not None:
atype.__class__ = type('HLS' + type_cls_name, (type_cls, conversion_cls), {})
atype.convert_precision(self.precision_converter)
return atype
else:
raise Exception('Cannot convert type: {}'.format(atype.__class__.__name__))
#endregion
#region Variables
class VariableDefinition(object):
def definition_cpp(self, name_suffix='', as_reference=False):
raise NotImplementedError
#region ArrayVariable
class VivadoArrayVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
return '{type} {name}{suffix}[{shape}]'.format(type=self.type.name, name=self.cppname, suffix=name_suffix, shape=self.size_cpp())
class QuartusArrayVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
return '{type} {name}{suffix}[{shape}] {pragma}'.format(type=self.type.name, name=self.cppname, suffix=name_suffix, shape=self.size_cpp(), pragma=self.pragma)
class ArrayVariableConverter(object):
def __init__(self, type_converter, prefix, definition_cls):
self.type_converter = type_converter
self.prefix = prefix
self.definition_cls = definition_cls
def convert(self, tensor_var, pragma='partition'):
if isinstance(tensor_var, self.definition_cls): # Already converted
return tensor_var
tensor_var.pragma = pragma
tensor_var.type = self.type_converter.convert(tensor_var.type)
tensor_var.__class__ = type(self.prefix + 'ArrayVariable', (type(tensor_var), self.definition_cls), {})
return tensor_var
class VivadoArrayVariableConverter(ArrayVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoArrayVariableDefinition)
class QuartusArrayVariableConverter(ArrayVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusArrayVariableDefinition)
#endregion
#region StructMemberVariable
class QuartusStructMemberVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
return '{type} {name}{suffix}[{shape}]'.format(type=self.type.name, name=self.member_name, suffix=name_suffix, shape=self.size_cpp())
class StructMemberVariableConverter(object):
def __init__(self, type_converter, prefix, definition_cls):
self.type_converter = type_converter
self.prefix = prefix
self.definition_cls = definition_cls
def convert(self, tensor_var, pragma='partition', struct_name=None):
if isinstance(tensor_var, self.definition_cls): # Already converted
return tensor_var
tensor_var.pragma = pragma
tensor_var.type = self.type_converter.convert(tensor_var.type)
assert struct_name is not None, 'struct_name must be provided when creating a StructMemberVariable'
tensor_var.struct_name = str(struct_name)
tensor_var.member_name = tensor_var.name
tensor_var.name = tensor_var.struct_name + '.' + tensor_var.member_name
tensor_var.__class__ = type(self.prefix + 'StructMemberVariable', (type(tensor_var), self.definition_cls), {})
return tensor_var
class QuartusStructMemberVariableConverter(StructMemberVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusStructMemberVariableDefinition)
#endregion
#region StreamVariable
class VivadoStreamVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
if as_reference: # Function parameter
return 'hls::stream<{type}> &{name}{suffix}'.format(type=self.type.name, name=self.cppname, suffix=name_suffix)
else: # Declaration
return 'hls::stream<{type}> {name}{suffix}("{name}")'.format(type=self.type.name, name=self.cppname, suffix=name_suffix)
class StreamVariableConverter(object):
def __init__(self, type_converter, prefix, definition_cls):
self.type_converter = type_converter
self.prefix = prefix
self.definition_cls = definition_cls
def convert(self, tensor_var, n_pack=1, depth=0):
if isinstance(tensor_var, self.definition_cls): # Already converted
return tensor_var
if depth == 0:
depth = np.prod(tensor_var.shape) // tensor_var.shape[-1]
tensor_var.pragma = ('stream', depth)
tensor_var.type = self.type_converter.convert(PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.shape[-1], n_pack))
tensor_var.__class__ = type(self.prefix + 'StreamVariable', (type(tensor_var), self.definition_cls), {})
return tensor_var
class VivadoStreamVariableConverter(StreamVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoStreamVariableDefinition)
#endregion
#region InplaceVariable
class InplaceVariableConverter(object):
def __init__(self, type_converter, prefix):
self.type_converter = type_converter
self.prefix = prefix
def convert(self, tensor_var, io_type):
if tensor_var.__class__.__name__.startswith(self.prefix): # Already converted
return tensor_var
if io_type == 'io_stream':
tensor_var.type = self.type_converter.convert(PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.shape[-1], n_pack=1))
else:
tensor_var.type = self.type_converter.convert(tensor_var.type)
tensor_var.__class__ = type(self.prefix + 'InplaceVariable', (type(tensor_var),), {})
return tensor_var
class VivadoInplaceVariableConverter(InplaceVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado')
class QuartusInplaceVariableConverter(InplaceVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus')
#endregion
#region WeightsVariable
class StaticWeightVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
return '{type} {name}[{size}]'.format(type=self.type.name, name=self.cppname, size=self.data_length)
class StaticWeightVariableConverter(object):
def __init__(self, type_converter):
self.type_converter = type_converter
def convert(self, weight_var):
if isinstance(weight_var, StaticWeightVariableDefinition): # Already converted
return weight_var
weight_var.weight_class = weight_var.__class__.__name__
weight_var.storage = 'register'
weight_var.type = self.type_converter.convert(weight_var.type)
weight_var.__class__ = type('StaticWeightVariable', (type(weight_var), StaticWeightVariableDefinition), {})
return weight_var
class BramWeightVariableConverter(object):
@classmethod
def convert(cls, weight_var):
weight_var.storage = 'bram'
return weight_var
#endregion
#endregion |
pycoin/symbols/mona.py | jaschadub/pycoin | 1,210 | 44280 | from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
network_name="Monacoin", symbol="MONA", subnet_name="mainnet",
wif_prefix_hex="b0", sec_prefix="MONASEC:", address_prefix_hex="32", pay_to_script_prefix_hex="37",
bip32_prv_prefix_hex="0488ade4", bip32_pub_prefix_hex="0488b21e", bech32_hrp="mona",
magic_header_hex="fbc0b6db", default_port=9401,
dns_bootstrap=["dnsseed.monacoin.org"])
|
PR_BCI_team/Team_StarLab/DKHan/examples/eeg_dg/train_eval.py | PatternRecognition/OpenBMI | 217 | 44309 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(LabelSmoothingCrossEntropy, self).__init__()
def forward(self, x, target, smoothing=0.1):
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
class ConfidenceLabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(ConfidenceLabelSmoothingCrossEntropy, self).__init__()
# self.confidence = [0.7425, 0.9325, 0.965, 0.5395, 0.86025, 0.754, 0.66475, 0.618, 0.7925, 0.6525, 0.5415,
# 0.5705, 0.6525, 0.59625, 0.6145, 0.62125, 0.7755, 0.866, 0.83425, 0.64125, 0.986, 0.82225,
# 0.70525, 0.5625, 0.5145, 0.5275, 0.57775, 0.918, 0.9175, 0.69575, 0.6555, 0.867, 0.945,
# 0.5155, 0.593, 0.976, 0.963, 0.591, 0.749, 0.5575, 0.52625, 0.6125, 0.83725, 0.97225,
# 0.93725, 0.6415, 0.61225, 0.584, 0.69175, 0.60825, 0.63575, 0.756, 0.61375, 0.53575]
self.confidence = [0.713, 0.953, 0.947, 0.514, 0.933, 0.725, 0.6025, 0.5855, 0.821, 0.6175, 0.547, 0.5605, 0.7,
0.609, 0.5785, 0.638, 0.8005, 0.824, 0.834, 0.5155, 0.9775, 0.8615, 0.6305, 0.549, 0.517,
0.5915, 0.5285, 0.923, 0.855, 0.751, 0.675, 0.773, 0.9805, 0.53, 0.5255, 0.9685, 0.9535,
0.5515, 0.8795, 0.497, 0.529, 0.5335, 0.8645, 0.9595, 0.9245, 0.5265, 0.452, 0.6415, 0.696,
0.617, 0.683, 0.7255, 0.5995, 0.5815, 0.772, 0.912, 0.983, 0.565, 0.7875, 0.783, 0.727,
0.6505, 0.764, 0.6875, 0.536, 0.5805, 0.605, 0.5835, 0.6505, 0.6045, 0.7505, 0.908, 0.8345,
0.767, 0.9945, 0.783, 0.78, 0.576, 0.512, 0.4635, 0.627, 0.913, 0.98, 0.6405, 0.636, 0.961,
0.9095, 0.501, 0.6605, 0.9835, 0.9725, 0.6305, 0.6185, 0.618, 0.5235, 0.6915, 0.81, 0.985,
0.95, 0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49, 0.985, 0.95,
0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49
]
def forward(self, x, target, sid):
confidencemat = torch.zeros_like(target,dtype=torch.float32)
for i in range(len(target)):
confidencemat[i] = self.confidence[sid[i]]
smoothing = 1 - confidencemat
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = torch.mul(confidencemat,nll_loss) + torch.mul(smoothing,smooth_loss)
return loss.mean()
class CroppedLoss:
def __init__(self, loss_function):
self.loss_function = loss_function
def __call__(self, preds, targets):
avg_preds = torch.mean(preds, dim=2)
avg_preds = avg_preds.squeeze(dim=1)
return self.loss_function(avg_preds, targets)
def train_crop(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
criterion = torch.nn.NLLLoss()
lossfn = CroppedLoss(criterion)
model.train()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
optimizer.zero_grad()
output = model(data)
output = model.embedding_net(data)
loss = lossfn(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
scheduler.step()
def eval_crop(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
outputs = []
for i in range(2):
outputs.append(model(data[:, :, :, i * 125:i * 125 + 1000]))
result = torch.cat([outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size]], dim=2)
y_preds_per_trial = result.mean(dim=2)
test_loss.append(F.nll_loss(y_preds_per_trial, target, reduction='sum').item()) # sum up batch loss
pred = y_preds_per_trial.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
import utils
import time
def train(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_mtl(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, 2*subjid+target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_gpu(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
correct = []
import time
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0], datas[1]
t2 = time.time()
t_data.append(t2 - t3)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
scheduler.step(losses.avg)
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def eval(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
def eval_cali(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.softmax(output, dim=1)
fpr, tpr, thresholds = roc_curve(target.cpu(), pred.cpu()[:,0])
AUC = auc(fpr, tpr)
correct.append(AUC)
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def vote(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
output = F.log_softmax(output, dim=1)
_, pred = output.topk(maxk, 1, True, True)
# pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
modevalue = torch.mode(pred%2)[0]
return modevalue
def eval_mtl(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device,
dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
pred = vote(output, subjid*2+target, (1,5))
test_loss.append(F.cross_entropy(output, subjid*2+target, reduction='sum').item()) # sum up batch loss
# pred_0 = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
# pred = pred_0%2
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def eval_ensemble(models, device, test_loader):
for model in models:
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = []
for model in models:
output.append(model(data.unsqueeze(dim=1)).unsqueeze(dim=2))
temp = torch.cat(output, dim=2)
temp2 = temp.mean(dim=2)
test_loss.append(F.cross_entropy(temp2, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(temp2, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
|
xbox/webapi/api/provider/titlehub/models.py | OpenXbox/xbox-webapi-python | 122 | 44315 | <reponame>OpenXbox/xbox-webapi-python
from datetime import datetime
from enum import Enum
from typing import Any, List, Optional
from xbox.webapi.common.models import CamelCaseModel, PascalCaseModel
class TitleFields(str, Enum):
SERVICE_CONFIG_ID = "scid"
ACHIEVEMENT = "achievement"
STATS = "stats"
GAME_PASS = "gamepass"
IMAGE = "image"
DETAIL = "detail"
FRIENDS_WHO_PLAYED = "friendswhoplayed"
ALTERNATE_TITLE_ID = "alternateTitleId"
class Achievement(CamelCaseModel):
current_achievements: int
total_achievements: int
current_gamerscore: int
total_gamerscore: int
progress_percentage: float
source_version: int
class Stats(CamelCaseModel):
source_version: int
class GamePass(CamelCaseModel):
is_game_pass: bool
class Image(CamelCaseModel):
url: str
type: str
class TitleHistory(CamelCaseModel):
last_time_played: datetime
visible: bool
can_hide: bool
class Attribute(CamelCaseModel):
applicable_platforms: Optional[List[str]]
maximum: Optional[int]
minimum: Optional[int]
name: str
class Availability(PascalCaseModel):
actions: List[str]
availability_id: str
platforms: List[str]
sku_id: str
class Detail(CamelCaseModel):
attributes: List[Attribute]
availabilities: List[Availability]
capabilities: List[str]
description: str
developer_name: str
genres: Optional[List[str]]
publisher_name: str
min_age: int
release_date: Optional[datetime]
short_description: Optional[str]
vui_display_name: Optional[str]
xbox_live_gold_required: bool
class Title(CamelCaseModel):
title_id: str
pfn: Optional[str]
bing_id: Optional[str]
service_config_id: Optional[str]
windows_phone_product_id: Optional[str]
name: str
type: str
devices: List[str]
display_image: str
media_item_type: str
modern_title_id: Optional[str]
is_bundle: bool
achievement: Optional[Achievement]
stats: Optional[Stats]
game_pass: Optional[GamePass]
images: Optional[List[Image]]
title_history: Optional[TitleHistory]
detail: Optional[Detail]
friends_who_played: Any
alternate_title_ids: Any
content_boards: Any
xbox_live_tier: Optional[str]
is_streamable: Optional[bool]
class TitleHubResponse(CamelCaseModel):
xuid: Optional[str]
titles: List[Title]
|
examples/py/factorization/parse_netflix.py | pnijhara/h2o4gpu | 458 | 44325 | <reponame>pnijhara/h2o4gpu<gh_stars>100-1000
import numpy as np
import scipy
from sklearn.model_selection import train_test_split
files = [
'../data/combined_data_1.txt',
'../data/combined_data_2.txt',
'../data/combined_data_3.txt',
'../data/combined_data_4.txt',
]
if __name__ == '__main__':
coo_row = []
coo_col = []
coo_val = []
for file_name in files:
print('processing {0}'.format(file_name))
with open(file_name, "r") as f:
movie = -1
for line in f:
if line.endswith(':\n'):
movie = int(line[:-2]) - 1
continue
assert movie >= 0
splitted = line.split(',')
user = int(splitted[0])
rating = float(splitted[1])
coo_row.append(user)
coo_col.append(movie)
coo_val.append(rating)
print('transformation...')
coo_col = np.array(coo_col)
user, indices = np.unique(coo_row, return_inverse=True)
coo_val = np.array(coo_val).astype(np.float32)
coo_matrix = scipy.sparse.coo_matrix((coo_val, (indices, coo_col)))
shape = coo_matrix.shape
print(shape)
train_row, test_row, train_col, test_col, train_data, test_data = train_test_split(
coo_matrix.row, coo_matrix.col, coo_matrix.data, test_size=0.2, random_state=42)
train = scipy.sparse.coo_matrix(
(train_data, (train_row, train_col)), shape=shape)
test = scipy.sparse.coo_matrix(
(test_data, (test_row, test_col)), shape=shape)
scipy.sparse.save_npz('../data/netflix_train.npz', train)
scipy.sparse.save_npz('../data/netflix_test.npz', test)
np.savez_compressed('../data/netflix_.npz', user)
|
vkbottle/api/token_generator/__init__.py | homus32/vkbottle | 698 | 44332 | from .abc import ABCTokenGenerator, Token
from .consistent import ConsistentTokenGenerator
from .single import SingleTokenGenerator
from .util import get_token_generator
|
diff_between_dates.py | DazEB2/SimplePyScripts | 117 | 44353 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from datetime import datetime
items = [
'23/03/2007',
'05/12/2007',
'22/08/2008',
'02/10/2009',
]
for i in range(len(items) - 1):
date_str_1, date_str_2 = items[i], items[i + 1]
date_1 = datetime.strptime(date_str_1, '%d/%m/%Y')
date_2 = datetime.strptime(date_str_2, '%d/%m/%Y')
days = (date_2 - date_1).days
print('{} - {} -> {} days'.format(date_str_1, date_str_2, days))
|
PyObjCTest/test_nsdocktile.py | Khan/pyobjc-framework-Cocoa | 132 | 44387 | <reponame>Khan/pyobjc-framework-Cocoa
from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSDockTile (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSDockTile.showsApplicationBadge)
self.assertArgIsBOOL(NSDockTile.setShowsApplicationBadge_, 0)
def testConstants(self):
self.assertEqual(NSAppKitVersionNumberWithDockTilePlugInSupport, 1001.0)
if __name__ == "__main__":
main()
|
src/tests/ftest/pool/uuid_corner_case.py | fedepad/daos | 429 | 44388 | #!/usr/bin/python3
"""
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from apricot import TestWithServers
class UUIDCornerCase(TestWithServers):
"""Create and destroy a pool with UUID.
This test covers some corner case and regression regarding UUID usage in
pool destroy.
:avocado: recursive
"""
def verify_destroy_uuid(self):
"""Destroy a pool with UUID and verify that it works.
Assume that self.pool is configured to use UUID.
"""
if not self.pool.destroy():
self.fail("pool isn't defined!")
self.log.info("pool destroy with UUID succeeded")
def test_create_label_destroy_uuid(self):
"""Test ID: JIRA-7943
Test Description: Create with a label, destroy with UUID.
:avocado: tags=all,full_regression
:avocado: tags=small
:avocado: tags=pool,uuid_corner_case,create_label_destroy_uuid
"""
# Create with a label - Default.
self.add_pool(connect=False)
# Make self.pool use UUID.
self.pool.use_label = False
# Destroy with UUID.
self.verify_destroy_uuid()
|
maintenance/templates/nodeclass.py | JFlynnXYZ/pymel | 287 | 44394 | <gh_stars>100-1000
{% if not existing %}
class {{ classname }}({{ parents }}):
{% endif %}
{% if attrs %}
{% for attr in attrs %}
{% for line in attr.getLines() %}
{{line}}
{% endfor %}
{% endfor %}
{% endif %}
{% if methods %}
{% for method in methods %}
{% for line in method.getLines() %}
{{line}}
{% endfor %}
{% endfor %}
{% else %}
{% endif %}
|
yasql/apps/sqlquery/migrations/0001_initial.py | Fanduzi/YaSQL | 443 | 44398 | <reponame>Fanduzi/YaSQL<gh_stars>100-1000
# Generated by Django 2.2.16 on 2020-12-15 07:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sqlorders', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DbQuerySchemas',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('schema', models.CharField(default='', max_length=64, verbose_name='库名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('cid', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlorders.DbConfig', verbose_name='数据库')),
],
options={
'verbose_name': 'DB查询库',
'verbose_name_plural': 'DB查询库',
'db_table': 'yasql_sqlquery_schemas',
'default_permissions': (),
'unique_together': {('cid', 'schema')},
},
),
migrations.CreateModel(
name='DbQueryTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('table', models.CharField(default='', max_length=128, verbose_name='表名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schema', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQuerySchemas', verbose_name='库名')),
],
options={
'verbose_name': 'DB查询表',
'verbose_name_plural': 'DB查询表',
'db_table': 'yasql_sqlquery_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserPrivs',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schemas', models.ManyToManyField(to='sqlquery.DbQuerySchemas', verbose_name='允许访问的库')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': 'DB查询用户权限',
'verbose_name_plural': 'DB查询用户权限',
'db_table': 'yasql_sqlquery_user_privileges',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserDenyTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
('user_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryUserPrivs', verbose_name='权限')),
],
options={
'verbose_name': '禁止用户访问的表',
'verbose_name_plural': '禁止用户访问的表',
'db_table': 'yasql_sqlquery_user_deny_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserAllowedTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
('user_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryUserPrivs', verbose_name='权限')),
],
options={
'verbose_name': '允许用户访问的表',
'verbose_name_plural': '允许用户访问的表',
'db_table': 'yasql_sqlquery_user_allowed_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryLog',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键id')),
('username', models.CharField(max_length=64, verbose_name='用户名')),
('host', models.CharField(max_length=256, verbose_name='目标数据库地址')),
('schema', models.CharField(default='', max_length=128, verbose_name='目标数据库')),
('tables', models.CharField(default='', max_length=200, verbose_name='目标表名')),
('query_sql', models.TextField(default='', verbose_name='查询SQL')),
('query_consume_time', models.FloatField(default=0.0, verbose_name='查询耗时,单位s')),
('query_status', models.CharField(default='', max_length=2048, verbose_name='查询是否成功或失败的原因')),
('affected_rows', models.IntegerField(default=0, verbose_name='影响影响行数')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='查询时间')),
],
options={
'verbose_name': 'DB查询日志',
'verbose_name_plural': 'DB查询日志',
'db_table': 'yasql_sqlquery_log',
'default_permissions': (),
'index_together': {('tables',), ('schema',), ('username',)},
},
),
migrations.CreateModel(
name='DbQueryGroupPrivs',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('group', models.CharField(default='', max_length=128, verbose_name='组名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schemas', models.ManyToManyField(to='sqlquery.DbQuerySchemas', verbose_name='允许访问的库')),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': 'DB查询组权限',
'verbose_name_plural': 'DB查询组权限',
'db_table': 'yasql_sqlquery_group_privileges',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryGroupDenyTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('group_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryGroupPrivs', verbose_name='权限')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
],
options={
'verbose_name': '禁止组访问的表',
'verbose_name_plural': '禁止组访问的表',
'db_table': 'yasql_sqlquery_group_deny_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryGroupAllowedTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('group_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryGroupPrivs', verbose_name='权限')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
],
options={
'verbose_name': '允许组访问的表',
'verbose_name_plural': '允许组访问的表',
'db_table': 'yasql_sqlquery_group_allowed_tables',
'default_permissions': (),
},
),
]
|
Giveme5W1H/examples/datasets/news_cluster/data_fixer.py | bkrrr/Giveme5W | 410 | 44416 | <reponame>bkrrr/Giveme5W
import glob
"""
this script is fixing errors found in the data, after processing
"""
# final fixes for known errors
for filepath in glob.glob('output/*.json'):
# Read in the file
with open(filepath, 'r') as file:
filedata = file.read()
# AIDA has a wrong URL for the Dallas Airport
wrong = 'http://en.wikipedia.org/wiki/Dallas/Dallas%2FFort%20Worth%20International%20Airport'
correct = 'http://en.wikipedia.org/wiki/Dallas%2FFort%20Worth%20International%20Airport'
# Replace the target string
filedata = filedata.replace(wrong, correct)
# Write the file out again
with open(filepath, 'w') as file:
file.write(filedata) |
elliot/recommender/neural/NeuMF/tf_custom_sampler_2.py | gategill/elliot | 175 | 44446 | """
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import tensorflow as tf
import numpy as np
import random
class Sampler():
def __init__(self, indexed_ratings=None, m=None, num_users=None, num_items=None, transactions=None, batch_size=512, random_seed=42):
np.random.seed(random_seed)
random.seed(random_seed)
self._UIDICT = {u: list(set(indexed_ratings[u])) for u in indexed_ratings}
self._POS = list({(u, i, 1) for u, items in self._UIDICT.items() for i in items})
self._POS = random.sample(self._POS, len(self._POS))
self._M = m
self._NUM_USERS = num_users
self._NUM_ITEMS = num_items
self._transactions = transactions
self._batch_size = batch_size
def _full_generator(self):
r_int = np.random.randint
n_items = self._NUM_ITEMS
ui_dict = self._UIDICT
neg = set()
for u, i, _ in self._POS:
ui = ui_dict[u]
for _ in range(self._M):
j = r_int(n_items)
while j in ui:
j = r_int(n_items)
neg.add((u, j, 0))
samples = self._POS[:]
samples.extend(list(neg))
samples = random.sample(samples, len(samples))
# u, i, b = map(np.array, zip(*samples))
# yield u,i,b
for start in range(0, len(samples), self._batch_size):
u, i, b = map(np.array, zip(*samples[start:min(start + self._batch_size, len(samples))]))
yield u, i, b
def step(self, batch_size: int):
r_int = np.random.randint
n_items = self._NUM_ITEMS
ui_dict = self._UIDICT
pos = {(u, i, 1) for u, items in ui_dict.items() for i in items}
neg = set()
for u, i, _ in pos:
ui = ui_dict[u]
for _ in range(self._M):
j = r_int(n_items)
while j in ui:
j = r_int(n_items)
neg.add((u, j, 0))
samples = list(pos)
samples.extend(list(neg))
samples = random.sample(samples, len(samples))
for start in range(0, len(samples), batch_size):
u, i, b = map(np.array, zip(*samples[start:min(start + batch_size, len(samples))]))
yield u, i, b
def create_tf_dataset(self):
data = tf.data.Dataset.from_generator(generator=self._full_generator,
output_types=(np.int64, np.int64, np.int64),
)
# data = data.unbatch().batch(batch_size=self._batch_size)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data |
hottbox/utils/validation/__init__.py | adamurban98/hottbox | 167 | 44460 | <gh_stars>100-1000
from .checks import is_toeplitz_matrix, is_super_symmetric, is_toeplitz_tensor
__all__ = [
"is_toeplitz_matrix",
"is_super_symmetric",
"is_toeplitz_tensor",
]
|
google_or_tools/knapsack_cp_sat.py | tias/hakank | 279 | 44467 | <gh_stars>100-1000
# Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Knapsack problem in OR-tools CP-SAT Solver.
Simple knapsack problem.
This is a port of my old CP model knapsack_cp.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import knapsack
def main(values, weights, n):
model = cp.CpModel()
#
# data
#
print("values:", values)
print("weights:", weights)
print("n:", n)
print()
# declare variables
#
# constraints
#
x, z, w = knapsack(model, values, weights, n)
# objective
model.Maximize(z)
#
# solution and search
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print("x:", [solver.Value(x[i]) for i in range(len(values))])
print("z:", solver.Value(z))
print("w:", solver.Value(w))
print()
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
values = [15, 100, 90, 60, 40, 15, 10, 1, 12, 12, 100]
weights = [2, 20, 20, 30, 40, 30, 60, 10, 21, 12, 2]
n = 102
if __name__ == "__main__":
main(values, weights, n)
|
L1TriggerConfig/L1ScalesProducers/python/L1CaloScalesConfig_cff.py | ckamtsikis/cmssw | 852 | 44471 | <reponame>ckamtsikis/cmssw<filename>L1TriggerConfig/L1ScalesProducers/python/L1CaloScalesConfig_cff.py
import FWCore.ParameterSet.Config as cms
from L1TriggerConfig.L1ScalesProducers.l1CaloScales_cfi import *
emrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1EmEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
jetrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1JetEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
htmrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1HtMissScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
hfrrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1HfRingEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
|
S2.Surface_Normal/unet/unet_model.py | leoshine/Spherical_Regression | 133 | 44477 | <reponame>leoshine/Spherical_Regression
"""
@Author : <NAME>
"""
# full assembly of the sub-parts to form the complete net
import numpy as np
from unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x4_ = self.up1(x5, x4)
x3_ = self.up2(x4_, x3)
x2_ = self.up3(x3_, x2)
x1_ = self.up4(x2_, x1)
x_ = self.outc(x1_)
print('x1 ', x1.shape)
print('x2 ', x2.shape)
print('x3 ', x3.shape)
print('x4 ', x4.shape)
print('x5 ', x5.shape)
print('x4_', x4_.shape)
print('x3_', x3_.shape)
print('x2_', x2_.shape)
print('x1_', x1_.shape)
print('x_ ', x_ .shape)
return x
if __name__ == '__main__':
model = UNet(n_channels=3, n_classes=1)
# import numpy as np
dummy_batch_data = np.zeros((2,3,224,224),dtype=np.float32)
dummy_batch_data = torch.from_numpy(dummy_batch_data )
Pred = model(dummy_batch_data)
print(Pred.shape)
|
xnmt/simultaneous/simult_state.py | neulab/xnmt | 195 | 44505 | import numbers
import xnmt.tensor_tools as tt
import xnmt.modelparts.decoders as decoders
import xnmt.transducers.recurrent as recurrent
import xnmt.transducers.base as transducers_base
import xnmt.expression_seqs as expr_seq
import xnmt.vocabs as vocabs
class SimultaneousState(decoders.AutoRegressiveDecoderState):
"""
The read/write state used to determine the state of the SimultaneousTranslator.
"""
def __init__(self,
model,
encoder_state: recurrent.UniLSTMState,
context_state: decoders.AutoRegressiveDecoderState,
output_embed: tt.Tensor,
to_read:int = 0,
to_write:int = 0,
prev_written_word: numbers.Integral = None,
reset_attender:bool = True):
super().__init__(None, None)
self.model = model
self.encoder_state = encoder_state
self.context_state = context_state
self.output_embed = output_embed
self.has_been_read = to_read
self.has_been_written = to_write
self.prev_written_word = prev_written_word
self.reset_attender = reset_attender
def read(self, src):
src_embed = self.model.src_embedder.embed(src[self.has_been_read])
next_encoder_state = self.encoder_state.add_input(src_embed)
return SimultaneousState(self.model, next_encoder_state, self.context_state,
self.output_embed, self.has_been_read+1, self.has_been_written,
self.prev_written_word, True)
def calc_context(self, src_encoding):
# Generating h_t based on RNN(h_{t-1}, embed(e_{t-1}))
if self.prev_written_word is None:
final_transducer_state = [transducers_base.FinalTransducerState(h, c) \
for h, c in zip(self.encoder_state.h(), self.encoder_state.c())]
context_state = self.model.decoder.initial_state(final_transducer_state,
vocabs.Vocab.SS)
else:
context_state = self.model.decoder.add_input(self.context_state, self.prev_written_word)
# Reset attender if there is a read action
reset_attender = self.reset_attender
if reset_attender:
self.model.attender.init_sent(expr_seq.ExpressionSequence(expr_list=src_encoding))
reset_attender = False
# Calc context for decoding
context_state.context = self.model.attender.calc_context(context_state.rnn_state.output())
return SimultaneousState(self.model, self.encoder_state, context_state,
self.output_embed, self.has_been_read, self.has_been_written,
self.prev_written_word,
reset_attender)
def write(self, next_word):
return SimultaneousState(self.model, self.encoder_state, self.context_state,
self.model.decoder.embedder.embed(next_word), self.has_been_read,
self.has_been_written+1,
next_word,
self.reset_attender)
# These states are used for decoding
def as_vector(self):
return self.context_state.as_vector()
@property
def rnn_state(self):
return self.context_state.rnn_state
@property
def context(self):
return self.context_state.context
@context.setter
def context(self, value):
self.context_state.context = value
|
tests/components/luftdaten/__init__.py | domwillcode/home-assistant | 30,023 | 44537 | <filename>tests/components/luftdaten/__init__.py
"""Define tests for the Luftdaten component."""
|
tensorflow_federated/python/core/impl/bindings_utils/data_conversions.py | RyanMarten/federated | 1,918 | 44562 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for converting Python data representations for CPP bindings."""
import collections
from typing import Mapping
from tensorflow_federated.python.core.impl.types import placements
def convert_cardinalities_dict_to_string_keyed(
cardinalities: Mapping[placements.PlacementLiteral,
int]) -> Mapping[str, int]:
"""Ensures incoming cardinalities dict is formatted correctly."""
if not isinstance(cardinalities, collections.abc.Mapping):
raise TypeError('`cardinalities` must be a `Mapping`. Received a type: '
f'{type(cardinalities)}.')
uri_cardinalities = {}
for placement, cardinality in cardinalities.items():
if not isinstance(placement, placements.PlacementLiteral):
raise TypeError('`cardinalities` must be a `Mapping` with '
'`PlacementLiteral` (e.g. `tff.CLIENTS`) keys. '
f'Received a key of type: {type(placement)}.')
if not isinstance(cardinality, int):
raise TypeError('`cardinalities` must be a `Mapping` with `int` values. '
f'Received a value of type: {type(cardinality)} for '
f'placement {placement}.')
uri_cardinalities[placement.uri] = cardinality
return uri_cardinalities
|
pixiedust/display/streamingDisplay.py | elgalu/pixiedust | 598 | 44566 | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.display.display import *
from pixiedust.display import addDisplayRunListener
#add a display Run Listener
addDisplayRunListener( lambda entity, options: onNewDisplayRun(entity, options) )
activesStreamingEntities = {}
def onNewDisplayRun(entity, options):
if "cell_id" in options and "showchrome" in options:
if options["cell_id"] in activesStreamingEntities:
del activesStreamingEntities[options["cell_id"]]
class StreamingDisplay(Display):
def __init__(self, options, entity, dataHandler=None):
super(StreamingDisplay,self).__init__(options,entity,dataHandler)
self.windowSize = 100 |
import_3dm/converters/curve.py | jesterKing/import_3dm | 167 | 44601 | # MIT License
# Copyright (c) 2018-2019 <NAME>, <NAME>, <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rhino3dm as r3d
from . import utils
from mathutils import Vector
CONVERT = {}
def import_null(rcurve, bcurve, scale):
print("Failed to convert type", type(rcurve))
return None
def import_line(rcurve, bcurve, scale):
fr = rcurve.Line.From
to = rcurve.Line.To
line = bcurve.splines.new('POLY')
line.points.add(1)
line.points[0].co = (fr.X * scale, fr.Y * scale, fr.Z * scale, 1)
line.points[1].co = (to.X * scale, to.Y * scale, to.Z * scale, 1)
return line
CONVERT[r3d.LineCurve] = import_line
def import_polyline(rcurve, bcurve, scale):
N = rcurve.PointCount
polyline = bcurve.splines.new('POLY')
polyline.use_cyclic_u = rcurve.IsClosed
if rcurve.IsClosed:
N -= 1
polyline.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Point(i)
polyline.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, 1)
return polyline
CONVERT[r3d.PolylineCurve] = import_polyline
def import_nurbs_curve(rcurve, bcurve, scale):
N = len(rcurve.Points)
nurbs = bcurve.splines.new('NURBS')
nurbs.use_cyclic_u = rcurve.IsClosed
nurbs.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Points[i]
nurbs.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, rpt.W * scale)
#nurbs.use_bezier_u = True
nurbs.use_endpoint_u = True
nurbs.order_u = rcurve.Order
return nurbs
CONVERT[r3d.NurbsCurve] = import_nurbs_curve
def import_arc(rcurve, bcurve, scale):
spt = Vector((rcurve.Arc.StartPoint.X, rcurve.Arc.StartPoint.Y, rcurve.Arc.StartPoint.Z)) * scale
ept = Vector((rcurve.Arc.EndPoint.X, rcurve.Arc.EndPoint.Y, rcurve.Arc.EndPoint.Z)) * scale
cpt = Vector((rcurve.Arc.Center.X, rcurve.Arc.Center.Y, rcurve.Arc.Center.Z)) * scale
r1 = spt - cpt
r2 = ept - cpt
r1.normalize()
r2.normalize()
d = rcurve.Arc.Length * scale
normal = r1.cross(r2)
t1 = normal.cross(r1)
t2 = normal.cross(r2)
'''
Temporary arc
'''
arc = bcurve.splines.new('NURBS')
arc.use_cyclic_u = False
arc.points.add(3)
arc.points[0].co = (spt.x, spt.y, spt.z, 1)
sspt = spt + t1 * d * 0.33
arc.points[1].co = (sspt.x, sspt.y, sspt.z, 1)
eept = ept - t2 * d * 0.33
arc.points[2].co = (eept.x, eept.y, eept.z, 1)
arc.points[3].co = (ept.x, ept.y, ept.z, 1)
'''
print("ARC")
print(" StartPoint:", rcurve.Arc.StartPoint)
print(" EndPoint:", rcurve.Arc.EndPoint)
print(" Center:", rcurve.Arc.Center)
print(" Radius:", rcurve.Radius)
'''
arc.use_endpoint_u = True
arc.order_u = 3
return arc
CONVERT[r3d.ArcCurve] = import_arc
def import_polycurve(rcurve, bcurve, scale):
for seg in range(rcurve.SegmentCount):
segcurve = rcurve.SegmentCurve(seg)
if type(segcurve) in CONVERT.keys():
CONVERT[type(segcurve)](segcurve, bcurve, scale)
CONVERT[r3d.PolyCurve] = import_polycurve
def import_curve(context, ob, name, scale, options):
og = ob.Geometry
oa = ob.Attributes
curve_data = context.blend_data.curves.new(name, type="CURVE")
if type(og) in CONVERT.keys():
curve_data.dimensions = '3D'
curve_data.resolution_u = 2
CONVERT[type(og)](og, curve_data, scale)
return curve_data
|
nailgun-client/py/__init__.py | h4l/nailgun | 362 | 44603 | <filename>nailgun-client/py/__init__.py<gh_stars>100-1000
# Copyright 2004-2015, <NAME>, Inc.
# Copyright 2017-Present Facebook, Inc.
from ng import NailgunConnection, NailgunException
|
Week15/2.py | bobsingh149/LeetCode | 101 | 44636 | <filename>Week15/2.py
### DO NOT REMOVE THIS
from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
### DO NOT REMOVE THIS
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
a1=0
a2=0
current=l1
i=0
while current:
a1+=current.val*(10**i)
current=current.next
i+=1
j=0
current=l2
while current:
a2+=current.val*(10**j)
current=current.next
j+=1
result=a1+a2
link=ListNode(0)
current=link
if result==0:
return link
while result>0:
current.next=ListNode(result%10)
result=result//10
current=current.next
return link.next
|
lib/synthetic_data.py | ppmdatix/rtdl | 298 | 44644 | "Code used to generate data for experiments with synthetic data"
import math
import typing as ty
import numba
import numpy as np
import torch
import torch.nn as nn
from numba.experimental import jitclass
from tqdm.auto import tqdm
class MLP(nn.Module):
def __init__(
self,
*,
d_in: int,
d_layers: ty.List[int],
d_out: int,
bias: bool = True,
) -> None:
super().__init__()
self.layers = nn.ModuleList(
[
nn.Linear(d_layers[i - 1] if i else d_in, x, bias=bias)
for i, x in enumerate(d_layers)
]
)
self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out)
def init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in')
if m.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(m.bias, -bound, bound)
self.apply(init_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for layer in self.layers:
x = layer(x)
x = torch.relu(x)
x = self.head(x)
x = x.squeeze(-1)
return x
@jitclass(
spec=[
('left_children', numba.int64[:]),
('right_children', numba.int64[:]),
('feature', numba.int64[:]),
('threshold', numba.float32[:]),
('value', numba.float32[:]),
('is_leaf', numba.int64[:]),
]
)
class Tree:
"Randomly initialized decision tree"
def __init__(self, n_features, n_nodes, max_depth):
assert (2 ** np.arange(max_depth + 1)).sum() >= n_nodes, "Too much nodes"
self.left_children = np.ones(n_nodes, dtype=np.int64) * -1
self.right_children = np.ones(n_nodes, dtype=np.int64) * -1
self.feature = np.random.randint(0, n_features, (n_nodes,))
self.threshold = np.random.randn(n_nodes).astype(np.float32)
self.value = np.random.randn(n_nodes).astype(np.float32)
depth = np.zeros(n_nodes, dtype=np.int64)
# Root is 0
self.is_leaf = np.zeros(n_nodes, dtype=np.int64)
self.is_leaf[0] = 1
# Keep adding nodes while we can (new node must have 2 children)
while True:
idx = np.flatnonzero(self.is_leaf)[np.random.choice(self.is_leaf.sum())]
if depth[idx] < max_depth:
unused = np.flatnonzero(
(self.left_children == -1)
& (self.right_children == -1)
& ~self.is_leaf
)
if len(unused) < 2:
break
lr_child = unused[np.random.permutation(unused.shape[0])[:2]]
self.is_leaf[lr_child] = 1
self.is_leaf[lr_child] = 1
depth[lr_child] = depth[idx] + 1
self.left_children[idx] = lr_child[0]
self.right_children[idx] = lr_child[1]
self.is_leaf[idx] = 0
def apply(self, x):
y = np.zeros(x.shape[0])
for i in range(x.shape[0]):
idx = 0
while not self.is_leaf[idx]:
if x[i, self.feature[idx]] < self.threshold[idx]:
idx = self.left_children[idx]
else:
idx = self.right_children[idx]
y[i] = self.value[idx]
return y
class TreeEnsemble:
"Combine multiple trees"
def __init__(self, *, n_trees, n_features, n_nodes, max_depth):
self.trees = [
Tree(n_features=n_features, n_nodes=n_nodes, max_depth=max_depth)
for _ in range(n_trees)
]
def apply(self, x):
return np.mean([t.apply(x) for t in tqdm(self.trees)], axis=0)
|
mitreattack/navlayers/generators/usage_generator.py | wetkind/mitreattack-python | 137 | 44653 | <filename>mitreattack/navlayers/generators/usage_generator.py<gh_stars>100-1000
from stix2 import Filter
from itertools import chain
import copy
from mitreattack.navlayers.exporters.matrix_gen import MatrixGen
from mitreattack.navlayers.core.exceptions import BadInput, typeChecker
from mitreattack.navlayers.core.layer import Layer
from mitreattack.navlayers.generators.gen_helpers import remove_revoked_depreciated, get_attack_id, build_data_strings
class UnableToFindStixObject(Exception):
pass
class StixObjectIsNotValid(Exception):
pass
class UsageLayerGenerator:
"""Generates a Layer that shows techniques mapped to an input group, software or mitigation"""
def __init__(self, source, domain='enterprise', resource=None):
"""
Initialize the Generator
:param source: Which source to use for data (local, taxii [server], or [remote] ATT&CK Workbench)
:param domain: Which matrix to use during generation
:param resource: string path to local STIX data (local) or url of workbench to reach out to (remote)
"""
self.matrix_handle = MatrixGen(source, resource)
self.domain = domain
try:
self.source_handle = self.matrix_handle.collections[domain]
except KeyError:
print(f"[UsageGenerator] - unable to load collection {domain} (current source = {source}).")
raise BadInput
self.full_matrix = self.matrix_handle.get_matrix(self.domain)
self.sources = self.source_handle.query([Filter('type', '=', 'x-mitre-data-source')])
self.components = self.source_handle.query([Filter('type', '=', 'x-mitre-data-component')])
self.source_mapping = build_data_strings(self.sources, self.components)
def get_stix_object(self, match):
"""
Retrieve the stix object for a given string
:param match: The string to match on - can be a name, alias, or ATT&CK ID
:return: the corresponding stix object
"""
filts = [
[Filter('name', '=', match)],
[Filter(match, 'in', 'aliases')],
[Filter(match, 'in', 'x_mitre_aliases')],
[Filter('external_references.external_id', '=', match)],
[Filter('id', '=', match)] # Support data component type objects from sum generator
]
data = list(chain.from_iterable(self.source_handle.query(f) for f in filts))
data = remove_revoked_depreciated(data)
if len(data):
if len(data) > 1:
print(f"[Usage Generator] - WARNING! Multiple matches found for {match}: [{data}]. Selecting the first "
f"one as default.")
return data[0]
raise UnableToFindStixObject
def get_matrix_data(self, match_pattern):
"""
Retrieve a list of attack-pattern (technique) objects that map to a group, software or mitigation.
:param match_pattern: Name, associated group/software (alias), or ATT&CK ID.
Techniques mapped to the object matching this pattern are returned.```
"""
obj = self.get_stix_object(match_pattern)
if obj['type'] == 'course-of-action':
verb = 'mitigates'
elif obj['type'] == 'x-mitre-data-component' or obj['type'] == 'x-mitre-data-source':
verb = 'detects'
else:
verb = 'uses'
related = self.source_handle.relationships(obj['id'], verb, source_only=True)
out = self.source_handle.query([
Filter('type', '=', 'attack-pattern'),
Filter('id', 'in', [r.target_ref for r in related])
])
return remove_revoked_depreciated(out), obj
def generate_technique_data(self, raw_matches):
"""
Generate technique list of dictionary objects (dictionary form of technique listing for a layer)
:param raw_matches: matching attack-pattern objects
:return: list of dictionary objects for every technique: score=0 if not in raw_matches, 1 otherwise,
description in comments
"""
shortlist = []
for match in raw_matches:
xid = ''
xphase = ''
for ref in match.external_references:
if ref.source_name == 'mitre-attack':
xid = ref.external_id
for phase in match.kill_chain_phases:
if phase.kill_chain_name == 'mitre-attack':
xphase = phase.phase_name
shortlist.append((xid, xphase, match.description))
full_matrix_listing = copy.deepcopy(self.full_matrix)
construct = list()
for tactic in full_matrix_listing:
for tech in tactic.techniques:
construct.append(dict(techniqueID=tech.id, score=0,
tactic=self.matrix_handle.convert(tactic.tactic.name)))
for tech_key in tactic.subtechniques:
for subtech in tactic.subtechniques[tech_key]:
construct.append(dict(techniqueID=subtech.id, score=0,
tactic=self.matrix_handle.convert(tactic.tactic.name)))
for entry in shortlist:
for tac in construct:
if entry[0] == tac['techniqueID'] and (entry[1] == '' or entry[1] == tac['tactic']):
tac['score'] = 1
tac['comment'] = entry[2]
return construct
def generate_layer(self, match):
"""
Generate a layer
:param match: the pattern to match
:return: layer object with annotated techniques
"""
typeChecker(type(self).__name__, match, str, "match")
raw_data, matched_obj = self.get_matrix_data(match)
if matched_obj['type'] not in ["course-of-action", 'tool', 'malware', 'intrusion-set',
'x-mitre-data-source', 'x-mitre-data-component']:
print(f"Warning: The input match {match} corresponds with an ATT&CK Technique, which is not supported. "
f"Please provide a group, software, or mitigation instead.")
raise StixObjectIsNotValid
a_id = get_attack_id(matched_obj)
processed_listing = self.generate_technique_data(raw_data)
raw_layer = dict(name=f"{matched_obj['name']} ({matched_obj['id']})", domain=self.domain + '-attack')
raw_layer['techniques'] = processed_listing
output_layer = Layer(raw_layer)
if matched_obj['type'] != 'x-mitre-data-component':
name = matched_obj['name']
else:
name = self.source_mapping[matched_obj['id']]
output_layer.description = f"{self.domain.capitalize() if len(self.domain) > 3 else self.domain.upper()} " \
f"techniques used by {name}, ATT&CK {matched_obj['type']} {a_id}"
return output_layer
|
uefi_firmware/guids/efiguids_asrock.py | MikeSpreitzer/uefi-firmware-parser | 492 | 44666 |
GUIDs = {
"ASROCK_ACPIS4_DXE_GUID": [69196166, 45078, 18382, 175, 197, 34, 105, 237, 212, 173, 100],
"ASROCK_USBRT_GUID": [82487969, 10657, 4567, 136, 56, 0, 80, 4, 115, 212, 235],
"ASROCK_RAID_SETUP_GUID": [152494882, 14144, 12750, 173, 98, 189, 23, 44, 236, 202, 54],
"ASROCK_RAID_LOADER_GUID": [164506669, 19843, 17592, 151, 208, 16, 133, 235, 84, 144, 184],
"ASROCK_SIOSLPSMI_GUID": [204970154, 53806, 19926, 140, 180, 60, 156, 251, 29, 134, 211],
"ASROCK_PLEDDXE_GUID": [260599413, 12329, 20175, 182, 148, 34, 137, 77, 63, 33, 67],
"ASROCK_A_DEFAULT_DXE_GUID": [303480106, 49246, 19565, 145, 231, 235, 142, 55, 173, 59, 122],
"ASROCK_USER_DEF_SETUP_DXE_GUID": [321832763, 48422, 20415, 177, 147, 138, 203, 80, 239, 189, 137],
"ASROCK_WAKEUP_CTRL_SMM_GUID": [460234064, 4836, 19285, 129, 217, 26, 191, 236, 89, 212, 252],
"ASROCK_AMI_AGESA_DXE_GUID": [503020538, 49038, 19729, 151, 102, 47, 176, 208, 68, 35, 16],
"ASROCK_HDD_READY_SMI_GUID": [560462180, 29336, 19087, 154, 42, 191, 228, 152, 214, 0, 168],
"ASROCK_MOUSE_DRIVER_GUID": [719032155, 51156, 20094, 190, 42, 35, 99, 77, 246, 104, 161],
"ASROCK_IDESMM_GUID": [829100592, 1280, 17810, 140, 9, 234, 186, 15, 182, 176, 127],
"ASROCK_BFGSMI_GUID": [978522445, 22131, 19929, 181, 179, 203, 114, 195, 71, 102, 155],
"ASROCK_ASRLOGODXE_GUID": [1033880909, 6629, 19152, 185, 134, 2, 214, 135, 215, 96, 229],
"ASROCK_ASM104_X_DXE_GUID": [1080004582, 21011, 19333, 184, 33, 151, 183, 122, 255, 121, 91],
"ASROCK_HDAUDIO_SMI_GUID": [1254707048, 58961, 19256, 161, 216, 45, 93, 239, 250, 15, 96],
"ASROCK_SM_BUS_DXE_GUID": [1265110573, 3427, 20322, 185, 48, 122, 233, 149, 185, 179, 163],
"ASROCK_USBINT13_GUID": [1275096281, 6586, 17943, 132, 131, 96, 145, 148, 161, 172, 252],
"ASROCK_SLP_SUPPORT_GUID": [1279872597, 22601, 21314, 69, 84, 84, 69, 82, 33, 33, 33],
"ASROCK_PATA_CONTROLLER_GUID": [1334921163, 38702, 20316, 184, 105, 160, 33, 130, 201, 217, 60],
"ASROCK_SATA_CONTROLLER_GUID": [1359869601, 46785, 18760, 174, 231, 89, 242, 32, 248, 152, 189],
"ASROCK_ACPIS4_SMM_GUID": [1368992111, 10248, 19194, 148, 196, 153, 246, 176, 108, 135, 30],
"ASROCK_POST_REPORT_GUID": [1413923475, 13211, 18381, 183, 25, 88, 93, 227, 148, 8, 204],
"ASROCK_CLOCK_GEN_DXE_GUID": [1447053695, 25694, 17937, 185, 21, 230, 130, 200, 189, 71, 131],
"ASROCK_UHCD_GUID": [1477302528, 14429, 4567, 136, 58, 0, 80, 4, 115, 212, 235],
"ASROCK_LEGACY_REGION_GUID": [1495543256, 59343, 18809, 182, 14, 166, 6, 126, 42, 24, 95],
"ASROCK_SLEEP_SMI_GUID": [1654193688, 54767, 17079, 187, 12, 41, 83, 40, 63, 87, 4],
"ASROCK_CMOS_MANAGER_SMM_GUID": [1751762355, 44173, 18803, 139, 55, 227, 84, 219, 243, 74, 221],
"ASROCK_AMD_AGESA_DXE_DRIVER_GUID": [1766895615, 28387, 4573, 173, 139, 8, 0, 32, 12, 154, 102],
"ASROCK_RE_FLASH_GUID": [1893836824, 3041, 17481, 191, 212, 158, 246, 140, 127, 2, 168],
"ASROCK_LEGACY_INTERRUPT_GUID": [1911362257, 9483, 17147, 140, 23, 16, 220, 250, 119, 23, 1],
"ASROCK_SMM_CHILD_DISPATCHER_GUID": [1966485705, 64229, 18345, 187, 191, 136, 214, 33, 205, 114, 130],
"ASROCK_BFGDXE_GUID": [1988600983, 65358, 18687, 188, 170, 103, 219, 246, 92, 66, 209],
"ASROCK_IFLASHSETUP_GUID": [2011543064, 9746, 19496, 188, 223, 162, 177, 77, 138, 62, 254],
"ASROCK_S4_SLEEPDELAY_GUID": [2075935011, 23902, 19484, 149, 209, 48, 235, 164, 135, 1, 202],
"ASROCK_HDD_READY_DXE_GUID": [2179248970, 9868, 20428, 142, 57, 28, 29, 62, 111, 110, 105],
"ASROCK_RTLANDXE_GUID": [2332955475, 13708, 20015, 147, 69, 238, 191, 29, 171, 152, 155],
"ASROCK_AMD_SB900_DXE_GUID": [2333274783, 28981, 20139, 175, 218, 5, 18, 247, 75, 101, 234],
"ASROCK_SB900_SMBUS_LIGHT_GUID": [2551896525, 34437, 19115, 175, 218, 5, 18, 247, 75, 101, 234],
"ASROCK_AAFTBL_SMI_GUID": [2667102838, 46054, 19161, 143, 231, 199, 79, 113, 196, 114, 72],
"ASROCK_NVRAMID_GUID": [2708185858, 25876, 17031, 190, 227, 98, 35, 183, 222, 44, 33],
"ASROCK_IDE_SECURITY_GUID": [2847342799, 414, 19851, 163, 167, 136, 225, 234, 1, 105, 158],
"ASROCK_ASM1061_DXE_GUID": [2848876245, 27959, 17694, 169, 191, 245, 143, 122, 11, 60, 194],
"ASROCK_ASM104_X_SMI_GUID": [2904508538, 47702, 18652, 142, 170, 232, 251, 234, 116, 184, 242],
"ASROCK_RTLANSMI_GUID": [3005543067, 24215, 19449, 180, 224, 81, 37, 193, 246, 5, 213],
"ASROCK_GEC_UPDATE_SMI_GUID": [3092850716, 5882, 17832, 146, 1, 28, 56, 48, 169, 115, 189],
"ASROCK_APMOFF_GUID": [3146872289, 16021, 19326, 135, 80, 157, 106, 163, 78, 183, 246],
"ASROCK_SMIFLASH_GUID": [3157425597, 47490, 20309, 159, 121, 5, 106, 215, 233, 135, 197],
"ASROCK_RAID_X64_GUID": [3295196034, 17744, 18697, 173, 87, 36, 150, 20, 27, 63, 74],
"ASROCK_AMD_SB900_SMM_GUID": [3351810409, 6722, 20062, 179, 75, 230, 131, 6, 113, 201, 166],
"ASROCK_FIREWIRE_GUID": [3367390790, 38937, 17835, 135, 93, 9, 223, 218, 109, 139, 27],
"ASROCK_IDE_SMART_GUID": [3581707566, 32927, 17871, 163, 119, 215, 123, 192, 203, 120, 238],
"ASROCK_SB_INTERFACE_DXE_GUID": [3622218689, 38683, 17947, 181, 228, 60, 55, 102, 38, 122, 217],
"ASROCK_AMD_SB900_SMM_DISPATCHER_GUID": [3748899802, 31298, 20062, 179, 75, 230, 131, 6, 113, 201, 166],
"ASROCK_AMDCPU_DXE_GUID": [3786566962, 16719, 18139, 154, 238, 66, 0, 119, 243, 93, 190],
"ASROCK_SMBIOS_DMIEDIT_GUID": [3802613560, 35124, 18677, 132, 18, 153, 233, 72, 200, 220, 27],
"ASROCK_SECURITY_SELECT_DXE_GUID": [3832130086, 37480, 20144, 160, 135, 221, 76, 238, 55, 64, 75],
"ASROCK_FILE_EXPLORER_LITE_GUID": [3875982164, 33976, 16573, 131, 47, 127, 178, 213, 203, 135, 179],
"ASROCK_PLEDSMM_GUID": [3911953940, 15869, 19568, 159, 230, 56, 153, 243, 108, 120, 70],
"ASROCK_CPU_SMBIOS_DRIVER_GUID": [3930959592, 43089, 19103, 171, 244, 183, 159, 162, 82, 130, 145],
"ASROCK_AAFTBL_DXE_GUID": [4279363330, 35107, 18380, 173, 48, 217, 224, 226, 64, 221, 16],
}
|
tf3d/utils/projections_test.py | deepneuralmachine/google-research | 23,901 | 44671 | <reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf3d.utils.projections."""
import numpy as np
import tensorflow as tf
from tf3d.utils import projections
class ProjectionsTest(tf.test.TestCase):
def _get_transformation_matrices(self):
# pylint: disable=bad-whitespace
rotate_world_to_camera = tf.constant([
[ 2.34773703e-04, -9.99944150e-01, -1.05634779e-02],
[ 1.04494076e-02, 1.05653536e-02, -9.99889553e-01],
[ 9.99945402e-01, 1.24365382e-04, 1.04513029e-02]
], dtype=tf.float32)
translate_world_to_camera = tf.constant([0.05705245,
-0.07546672,
-0.26938692], dtype=tf.float32)
tr_camera_to_image = tf.constant([
[ 721.53771973, 0. , 609.55932617],
[ 0. , 721.53771973, 172.85400391],
[ 0. , 0. , 1. ]], dtype=tf.float32)
# pylint: enable=bad-whitespace
return (rotate_world_to_camera,
translate_world_to_camera,
tr_camera_to_image)
def test_to_camera_frame(self):
num_points = 1000
original_wf_points = tf.random.uniform((num_points, 3),
minval=-10.0,
maxval=10.0)
(rotate_world_to_camera,
translate_world_to_camera,
_) = self._get_transformation_matrices()
cf_points = projections.to_camera_frame(original_wf_points,
rotate_world_to_camera,
translate_world_to_camera)
computed_wf_points = projections.to_world_frame(cf_points,
rotate_world_to_camera,
translate_world_to_camera)
self.assertAllClose(original_wf_points.numpy(), computed_wf_points.numpy())
self.assertEqual(original_wf_points.shape, (num_points, 3))
self.assertEqual(cf_points.shape, (num_points, 3))
self.assertEqual(computed_wf_points.shape, (num_points, 3))
def test_to_image_frame(self):
height = 375
width = 1242
cf_points = tf.convert_to_tensor([
[0.0, 0.0, 10.0],
[0.0, 0.2, 12.0],
[0.0, -0.2, 20.0],
[1.0, 0.0, 20.0],
[-1.0, 0.0, 20.0],
[0, 0, -10],
[-1, 1, -20]], dtype=tf.float32)
_, _, camera_intrinsics = self._get_transformation_matrices()
image_points, within_image = projections.to_image_frame(cf_points,
height,
width,
camera_intrinsics)
np_image_points = image_points.numpy()
np_within_image = within_image.numpy()
self.assertEqual(np_within_image.sum(), 5)
self.assertTrue((np_image_points[np_within_image] >= 0).all())
self.assertTrue((np_image_points[np_within_image, 0] < width).all())
self.assertTrue((np_image_points[np_within_image, 1] < height).all())
def test_image_frame_to_camera_frame(self):
num_points = 10
height, width = 480, 640
image_frame = tf.concat([
tf.random.uniform(
[num_points, 1], minval=0.0, maxval=width, dtype=tf.float32),
tf.random.uniform(
[num_points, 1], minval=0.0, maxval=height, dtype=tf.float32)
],
axis=1)
camera_intrinsics = tf.constant(
[[500, 0., 320.], [0., 500., 120.], [0., 0., 1.]], dtype=tf.float32)
camera_frame = projections.image_frame_to_camera_frame(
image_frame=image_frame, camera_intrinsics=camera_intrinsics)
self.assertEqual(camera_frame.shape, (num_points, 3))
def _test_create_image_from_rank1_values_unbatched(self, use_sparse_tensor):
image_height, image_width = (240, 320)
xx = tf.random.uniform((10,), minval=0, maxval=image_width, dtype=tf.int32)
yy = tf.random.uniform((10,), minval=0, maxval=image_height, dtype=tf.int32)
pixel_locations = tf.stack([yy, xx], axis=1)
pixel_values = tf.ones((tf.shape(pixel_locations)[0],), dtype=tf.float32)
created_image = projections.create_image_from_point_values_unbatched(
pixel_locations=pixel_locations,
pixel_values=pixel_values,
image_height=image_height,
image_width=image_width,
default_value=255,
use_sparse_tensor=use_sparse_tensor)
self.assertAllClose(pixel_values.numpy(), np.ones((10,), dtype=np.uint8))
np_expected_image = np.full((image_height, image_width, 1), 255, np.uint8)
np_yy = pixel_locations.numpy()[:, 0]
np_xx = pixel_locations.numpy()[:, 1]
np_expected_image[np_yy, np_xx, Ellipsis] = [1]
self.assertAllClose(created_image.numpy(), np_expected_image)
def _test_create_image_from_rank1_values(self, use_sparse_tensor):
image_height, image_width = (240, 320)
xx = tf.random.uniform((4, 10),
minval=0,
maxval=image_width,
dtype=tf.int32)
yy = tf.random.uniform((4, 10),
minval=0,
maxval=image_height,
dtype=tf.int32)
pixel_locations = tf.stack([yy, xx], axis=2)
pixel_values = tf.random.uniform([4, 10, 3],
minval=-2.0,
maxval=2.0,
dtype=tf.float32)
num_valid_points = tf.constant([10, 3, 5, 7], dtype=tf.int32)
created_image = projections.create_image_from_point_values(
pixel_locations=pixel_locations,
pixel_values=pixel_values,
num_valid_points=num_valid_points,
image_height=image_height,
image_width=image_width,
default_value=255.0,
use_sparse_tensor=use_sparse_tensor)
self.assertAllEqual(created_image.shape, np.array([4, 240, 320, 3]))
def _test_create_image_from_rank2_values_unbatched(self, use_sparse_tensor):
image_height, image_width = (240, 320)
xx = tf.random.uniform((10,), minval=0, maxval=image_width, dtype=tf.int32)
yy = tf.random.uniform((10,), minval=0, maxval=image_height, dtype=tf.int32)
pixel_locations = tf.stack([yy, xx], axis=1)
pixel_values = tf.random.uniform((10, 3),
minval=0,
maxval=255,
dtype=tf.float32)
created_image = projections.create_image_from_point_values_unbatched(
pixel_locations=pixel_locations,
pixel_values=pixel_values,
image_height=image_height,
image_width=image_width,
default_value=0,
use_sparse_tensor=use_sparse_tensor)
self.assertEqual(created_image.shape, (image_height, image_width, 3))
np_pixel_locations = pixel_locations.numpy().round().astype(np.int32)
np_yy = np_pixel_locations[:, 0]
np_xx = np_pixel_locations[:, 1]
self.assertAllClose(
created_image.numpy()[np_yy, np_xx], pixel_values.numpy(), atol=1e-3)
def test_create_image_rank1_unbatched_sparse(self):
self._test_create_image_from_rank1_values_unbatched(use_sparse_tensor=True)
def test_create_image_rank1_unbatched_scatter(self):
self._test_create_image_from_rank1_values_unbatched(use_sparse_tensor=False)
def test_create_image_rank1_sparse(self):
self._test_create_image_from_rank1_values(use_sparse_tensor=True)
def test_create_image_rank1_scatter(self):
self._test_create_image_from_rank1_values(use_sparse_tensor=False)
def test_create_image_rank2_unbatched_sparse(self):
self._test_create_image_from_rank2_values_unbatched(use_sparse_tensor=True)
def test_create_image_rank2_unbatched_scatter(self):
self._test_create_image_from_rank2_values_unbatched(use_sparse_tensor=False)
def _test_move_image_values_to_points(self, use_sparse_tensor):
image_values = tf.constant(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]]],
dtype=tf.float32)
image_point_indices = tf.constant([[[1], [-1]], [[6], [-1]], [[0], [3]]],
dtype=tf.int32)
num_points = 10
point_values = projections.move_image_values_to_points(
image_values=image_values,
image_point_indices=image_point_indices,
num_points=num_points,
default_value=-1.0,
use_sparse_tensor=use_sparse_tensor)
expected_point_values = tf.constant(
[[9.0, 10.0],
[1.0, 2.0],
[-1.0, -1.0],
[11.0, 12.0],
[-1.0, -1.0],
[-1.0, -1.0],
[5.0, 6.0],
[-1.0, -1.0],
[-1.0, -1.0],
[-1.0, -1.0]],
dtype=tf.float32)
self.assertAllClose(point_values.numpy(), expected_point_values.numpy())
def test_move_image_values_to_points_sparse(self):
self._test_move_image_values_to_points(use_sparse_tensor=True)
def test_move_image_values_to_points_scatter(self):
self._test_move_image_values_to_points(use_sparse_tensor=False)
def test_update_pixel_locations_given_deformed_meshgrid(self):
pixel_locations = tf.constant([[0, 1],
[1, 1],
[2, 0],
[2, 2],
[0, 2],
[0, 1],
[1, 1],
[3, 3]], dtype=tf.int32)
meshgrid_y = tf.constant([[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
meshgrid_x = tf.constant([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]])
meshgrid_yx = tf.stack([meshgrid_y, meshgrid_x], axis=2)
deformed_meshgrid_y = tf.constant([[-1, 3, 2, 2],
[-1, -1, 3, 2],
[1, -1, 2, -1]])
deformed_meshgrid_x = tf.constant([[2, -2, 2, -1],
[-1, 3, 2, 3],
[2, -1, 3, -1]])
deformed_meshgrid_yx = tf.stack([deformed_meshgrid_y, deformed_meshgrid_x],
axis=2)
updated_pixel_locations = (
projections.update_pixel_locations_given_deformed_meshgrid(
pixel_locations=pixel_locations,
original_meshgrid=meshgrid_yx,
deformed_meshgrid=deformed_meshgrid_yx))
expected_updated_pixel_locations = tf.constant([[2, 0],
[0, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[2, 0],
[0, 2],
[-1, -1]], dtype=tf.int32)
self.assertAllEqual(updated_pixel_locations.numpy(),
expected_updated_pixel_locations.numpy())
def test_project_points_with_depth_visibility_check(self):
point_positions = tf.constant([[-1.0, -1.0, 1.0],
[-1.0, 1.0, 1.0],
[1.0, -1.0, 1.0],
[1.0, 1.0, 1.0]], dtype=tf.float32)
camera_intrinsics = tf.constant([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]], dtype=tf.float32)
camera_rotation_matrix = tf.constant([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]], dtype=tf.float32)
camera_translation = tf.constant([5.0, 5.0, 0.0], dtype=tf.float32)
image_width = 10
image_height = 10
depth_image_00 = tf.ones([5, 5, 1], dtype=tf.float32) * 1.0
depth_image_01 = tf.ones([5, 5, 1], dtype=tf.float32) * 2.0
depth_image_10 = tf.ones([5, 5, 1], dtype=tf.float32) * 1.0
depth_image_11 = tf.ones([5, 5, 1], dtype=tf.float32) * 4.0
depth_image = tf.concat([
tf.concat([depth_image_00, depth_image_01], axis=1),
tf.concat([depth_image_10, depth_image_11], axis=1)
],
axis=0)
depth_threshold = 0.1
(points_in_image_frame,
visibility) = projections.project_points_with_depth_visibility_check(
point_positions=point_positions,
camera_intrinsics=camera_intrinsics,
camera_rotation_matrix=camera_rotation_matrix,
camera_translation=camera_translation,
image_width=image_width,
image_height=image_height,
depth_image=depth_image,
depth_intrinsics=camera_intrinsics,
depth_threshold=depth_threshold)
self.assertAllEqual(
visibility.numpy().astype(np.int32), np.array([1, 1, 0, 0]))
self.assertAllEqual(points_in_image_frame.numpy(), np.array([[4, 4],
[4, 6],
[6, 4],
[6, 6]]))
if __name__ == '__main__':
tf.test.main()
|
docs/core/examples/stdin.py | Khymeira/twisted | 4,612 | 44674 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An example of reading a line at a time from standard input
without blocking the reactor.
"""
from os import linesep
from twisted.internet import stdio
from twisted.protocols import basic
class Echo(basic.LineReceiver):
delimiter = linesep.encode("ascii")
def connectionMade(self):
self.transport.write(b">>> ")
def lineReceived(self, line):
self.sendLine(b"Echo: " + line)
self.transport.write(b">>> ")
def main():
stdio.StandardIO(Echo())
from twisted.internet import reactor
reactor.run()
if __name__ == "__main__":
main()
|
floweaver/hierarchy.py | fullflu/floweaver | 342 | 44675 | import networkx as nx
class Hierarchy:
def __init__(self, tree, column):
self.tree = tree
self.column = column
def _leaves_below(self, node):
leaves = sum(([vv for vv in v if self.tree.out_degree(vv) == 0]
for k, v in nx.dfs_successors(self.tree, node).items()),
[])
return sorted(leaves) or [node]
def __call__(self, *nodes):
"""Return process IDs below the given nodes in the tree"""
s = set()
for node in nodes:
if self.tree.in_degree(node) == 0:
return None # all
s.update(self._leaves_below(node))
if len(s) == 1:
query = '{} == "{}"'.format(self.column, s.pop())
else:
query = '{} in {}'.format(self.column, repr(sorted(s)))
return query
|
venv/Lib/site-packages/win32/Demos/win32gui_demo.py | ajayiagbebaku/NFL-Model | 150 | 44684 | <reponame>ajayiagbebaku/NFL-Model
# The start of a win32gui generic demo.
# Feel free to contribute more demos back ;-)
import win32gui, win32con, win32api
import time, math, random
def _MyCallback(hwnd, extra):
hwnds, classes = extra
hwnds.append(hwnd)
classes[win32gui.GetClassName(hwnd)] = 1
def TestEnumWindows():
windows = []
classes = {}
win32gui.EnumWindows(_MyCallback, (windows, classes))
print(
"Enumerated a total of %d windows with %d classes"
% (len(windows), len(classes))
)
if "tooltips_class32" not in classes:
print("Hrmmmm - I'm very surprised to not find a 'tooltips_class32' class.")
def OnPaint_1(hwnd, msg, wp, lp):
dc, ps = win32gui.BeginPaint(hwnd)
win32gui.SetGraphicsMode(dc, win32con.GM_ADVANCED)
br = win32gui.CreateSolidBrush(win32api.RGB(255, 0, 0))
win32gui.SelectObject(dc, br)
angle = win32gui.GetWindowLong(hwnd, win32con.GWL_USERDATA)
win32gui.SetWindowLong(hwnd, win32con.GWL_USERDATA, angle + 2)
r_angle = angle * (math.pi / 180)
win32gui.SetWorldTransform(
dc,
{
"M11": math.cos(r_angle),
"M12": math.sin(r_angle),
"M21": math.sin(r_angle) * -1,
"M22": math.cos(r_angle),
"Dx": 250,
"Dy": 250,
},
)
win32gui.MoveToEx(dc, 250, 250)
win32gui.BeginPath(dc)
win32gui.Pie(dc, 10, 70, 200, 200, 350, 350, 75, 10)
win32gui.Chord(dc, 200, 200, 850, 0, 350, 350, 75, 10)
win32gui.LineTo(dc, 300, 300)
win32gui.LineTo(dc, 100, 20)
win32gui.LineTo(dc, 20, 100)
win32gui.LineTo(dc, 400, 0)
win32gui.LineTo(dc, 0, 400)
win32gui.EndPath(dc)
win32gui.StrokeAndFillPath(dc)
win32gui.EndPaint(hwnd, ps)
return 0
wndproc_1 = {win32con.WM_PAINT: OnPaint_1}
def OnPaint_2(hwnd, msg, wp, lp):
dc, ps = win32gui.BeginPaint(hwnd)
win32gui.SetGraphicsMode(dc, win32con.GM_ADVANCED)
l, t, r, b = win32gui.GetClientRect(hwnd)
for x in range(25):
vertices = (
{
"x": int(random.random() * r),
"y": int(random.random() * b),
"Red": int(random.random() * 0xFF00),
"Green": 0,
"Blue": 0,
"Alpha": 0,
},
{
"x": int(random.random() * r),
"y": int(random.random() * b),
"Red": 0,
"Green": int(random.random() * 0xFF00),
"Blue": 0,
"Alpha": 0,
},
{
"x": int(random.random() * r),
"y": int(random.random() * b),
"Red": 0,
"Green": 0,
"Blue": int(random.random() * 0xFF00),
"Alpha": 0,
},
)
mesh = ((0, 1, 2),)
win32gui.GradientFill(dc, vertices, mesh, win32con.GRADIENT_FILL_TRIANGLE)
win32gui.EndPaint(hwnd, ps)
return 0
wndproc_2 = {win32con.WM_PAINT: OnPaint_2}
def TestSetWorldTransform():
wc = win32gui.WNDCLASS()
wc.lpszClassName = "test_win32gui_1"
wc.style = win32con.CS_GLOBALCLASS | win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = wndproc_1
class_atom = win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindow(
wc.lpszClassName,
"Spin the Lobster!",
win32con.WS_CAPTION | win32con.WS_VISIBLE,
100,
100,
900,
900,
0,
0,
0,
None,
)
for x in range(500):
win32gui.InvalidateRect(hwnd, None, True)
win32gui.PumpWaitingMessages()
time.sleep(0.01)
win32gui.DestroyWindow(hwnd)
win32gui.UnregisterClass(wc.lpszClassName, None)
def TestGradientFill():
wc = win32gui.WNDCLASS()
wc.lpszClassName = "test_win32gui_2"
wc.style = win32con.CS_GLOBALCLASS | win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = wndproc_2
class_atom = win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindowEx(
0,
class_atom,
"Kaleidoscope",
win32con.WS_CAPTION
| win32con.WS_VISIBLE
| win32con.WS_THICKFRAME
| win32con.WS_SYSMENU,
100,
100,
900,
900,
0,
0,
0,
None,
)
s = win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE)
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE, s | win32con.WS_EX_LAYERED)
win32gui.SetLayeredWindowAttributes(hwnd, 0, 175, win32con.LWA_ALPHA)
for x in range(30):
win32gui.InvalidateRect(hwnd, None, True)
win32gui.PumpWaitingMessages()
time.sleep(0.3)
win32gui.DestroyWindow(hwnd)
win32gui.UnregisterClass(class_atom, None)
print("Enumerating all windows...")
TestEnumWindows()
print("Testing drawing functions ...")
TestSetWorldTransform()
TestGradientFill()
print("All tests done!")
|
products/ui/llbuildui/graphalgorithms.py | uraimo/swift-llbuild | 1,034 | 44688 | import orderedset
def find_cycle(nodes, successors):
path = orderedset.orderedset()
visited = set()
def visit(node):
# If the node is already in the current path, we have found a cycle.
if not path.add(node):
return (path, node)
# If we have otherwise already visited this node, we don't need to visit
# it again.
if node in visited:
item = path.pop()
assert item == node
return
visited.add(node)
# Otherwise, visit all the successors.
for succ in successors(node):
cycle = visit(succ)
if cycle is not None:
return cycle
item = path.pop()
assert item == node
return None
for node in nodes:
cycle = visit(node)
if cycle is not None:
return cycle
else:
assert not path.items
return None
|
tests/python/kaolin/metrics/__init__.py | mlej8/kaolin | 3,747 | 44700 | from . import test_trianglemesh
from . import test_voxelgrid |
securityheaders/checkers/hsts/test_maxagezero.py | th3cyb3rc0p/securityheaders | 151 | 44722 | <reponame>th3cyb3rc0p/securityheaders
import unittest
from securityheaders.checkers.hsts import HSTSMaxAgeZeroChecker
class HSTSMaxAgeZeroCheckerTest(unittest.TestCase):
def setUp(self):
self.x = HSTSMaxAgeZeroChecker()
def test_checkNoHSTS(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNoneHSTS(self):
hasx = dict()
hasx['strict-transport-security'] = None
self.assertEqual(self.x.check(hasx), [])
def test_ValidHSTS(self):
hasx4 = dict()
hasx4['strict-transport-security'] = "max-age=31536000; includeSubDomains"
result = self.x.check(hasx4)
self.assertEqual(self.x.check(hasx4), [])
def test_ZeroMaxAge(self):
hasx5 = dict()
hasx5['strict-transport-security'] = "max-age=0; includeSubDomains"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
if __name__ == '__main__':
unittest.main()
|
merlion/models/anomaly/forecast_based/arima.py | ankitakashyap05/Merlion | 2,215 | 44753 | <reponame>ankitakashyap05/Merlion
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Classic ARIMA (AutoRegressive Integrated Moving Average) forecasting model,
adapted for anomaly detection.
"""
from merlion.models.anomaly.forecast_based.base import ForecastingDetectorBase
from merlion.models.anomaly.base import DetectorConfig
from merlion.models.forecast.arima import ArimaConfig, Arima
from merlion.post_process.threshold import AggregateAlarms
class ArimaDetectorConfig(ArimaConfig, DetectorConfig):
_default_threshold = AggregateAlarms(alm_threshold=2.5)
class ArimaDetector(ForecastingDetectorBase, Arima):
config_class = ArimaDetectorConfig
|
tests/conftest.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | 107 | 44757 | from typing import Callable, AsyncGenerator, Generator
import asyncio
import httpx
import pytest
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from fastapi.testclient import TestClient
TestClientGenerator = Callable[[FastAPI], AsyncGenerator[httpx.AsyncClient, None]]
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
async def client(
request: pytest.FixtureRequest,
) -> AsyncGenerator[httpx.AsyncClient, None]:
marker = request.node.get_closest_marker("fastapi")
if marker is None:
raise ValueError("client fixture: the marker fastapi must be provided")
try:
app = marker.kwargs["app"]
except KeyError:
raise ValueError(
"client fixture: keyword argument app must be provided in the marker"
)
if not isinstance(app, FastAPI):
raise ValueError("client fixture: app must be a FastAPI instance")
dependency_overrides = marker.kwargs.get("dependency_overrides")
if dependency_overrides:
if not isinstance(dependency_overrides, dict):
raise ValueError(
"client fixture: dependency_overrides must be a dictionary"
)
app.dependency_overrides = dependency_overrides
run_lifespan_events = marker.kwargs.get("run_lifespan_events", True)
if not isinstance(run_lifespan_events, bool):
raise ValueError("client fixture: run_lifespan_events must be a bool")
test_client_generator = httpx.AsyncClient(app=app, base_url="http://app.io")
if run_lifespan_events:
async with LifespanManager(app):
async with test_client_generator as test_client:
yield test_client
else:
async with test_client_generator as test_client:
yield test_client
@pytest.fixture
def websocket_client(
request: pytest.FixtureRequest,
event_loop: asyncio.AbstractEventLoop,
) -> Generator[TestClient, None, None]:
asyncio.set_event_loop(event_loop)
marker = request.node.get_closest_marker("fastapi")
if marker is None:
raise ValueError("client fixture: the marker fastapi must be provided")
try:
app = marker.kwargs["app"]
except KeyError:
raise ValueError(
"client fixture: keyword argument app must be provided in the marker"
)
if not isinstance(app, FastAPI):
raise ValueError("client fixture: app must be a FastAPI instance")
dependency_overrides = marker.kwargs.get("dependency_overrides")
if dependency_overrides:
if not isinstance(dependency_overrides, dict):
raise ValueError(
"client fixture: dependency_overrides must be a dictionary"
)
app.dependency_overrides = dependency_overrides
with TestClient(app) as test_client:
yield test_client
|
examples/applications/restapi/example_ws_client/ws_client.py | electrumsv/electrumsv | 136 | 44814 | import aiohttp
import asyncio
import json
import logging
import requests
from typing import cast, Iterable, List, Optional
from electrumsv.constants import TxFlags
logging.basicConfig(level=logging.DEBUG)
class TxStateWSClient:
def __init__(self, host: str="127.0.0.1", port: int=9999, wallet_name: str="worker1.sqlite",
wallet_password: str="<PASSWORD>", account: int=1) -> None:
self.host = host
self.port = port
self.url = f'http://{self.host}:{self.port}/v1/regtest/dapp/' \
f'wallets/{wallet_name}/{account}/txs/websocket/text-events'
self.wallet_name = wallet_name
self.wallet_password = <PASSWORD>
self.account = account
self.session = aiohttp.ClientSession()
self._ws: Optional[aiohttp.client.ClientWebSocketResponse] = None
self.msg_queue = asyncio.Queue()
self.logger = logging.getLogger("tx-state-ws-client")
async def __aenter__(self):
# Normally the RESTAPI pulls the password out of the body, but `ws_connect` cannot be
# passed a data/json parameter even if it's method is changed to POST.
self._ws = await self.session.ws_connect(self.url,
headers={ "X-Wallet-Password": self.wallet_password })
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await cast(aiohttp.client.ClientWebSocketResponse, self._ws).close()
await self.session.close()
async def send_str(self, msg: str):
await cast(aiohttp.client.ClientWebSocketResponse, self._ws).send_str(msg)
async def _receive_msgs(self):
try:
async for msg in cast(aiohttp.client.ClientWebSocketResponse, self._ws):
if json.loads(msg.data).get('code'):
self.logger.debug(f'Error message received from server: {msg.data}')
continue
self.logger.debug(f'Message received from server: {msg.data}')
self.msg_queue.put_nowait(msg.data)
await asyncio.sleep(0)
if msg.type in (aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.ERROR):
break
finally:
self.msg_queue.put_nowait(None) # poison pill
async def block_until_mempool(self, txids: Iterable[str]) -> None:
self._receive_msg_task = asyncio.create_task(self._receive_msgs())
subs = json.dumps({
"txids": list(txids)
})
txids_set = set(txids)
await self.send_str(subs)
while True:
msg = await self.msg_queue.get()
if not msg: # poison pill
break
msg = json.loads(msg)
txid = msg.get("txid")
if not txid:
continue
tx_flags = msg.get("tx_flags")
if msg.get("txid") in txids_set and \
(tx_flags & TxFlags.STATE_CLEARED) == TxFlags.STATE_CLEARED or \
(tx_flags & TxFlags.STATE_SETTLED) == TxFlags.STATE_SETTLED:
txids_set.remove(txid)
if len(txids_set) == 0:
break
async def block_until_confirmed(self, txids: List[str]) -> None:
self._receive_msg_task = asyncio.create_task(self._receive_msgs())
subs = json.dumps({
"txids": list(txids)
})
txids_set = set(txids)
await self.send_str(subs)
while True:
msg = await self.msg_queue.get()
if not msg: # poison pill
break
self.logger.debug(msg)
msg = json.loads(msg)
txid = msg.get("txid")
if not txid:
continue
tx_flags = msg.get("tx_flags")
if msg.get("txid") in txids_set and \
(tx_flags & TxFlags.STATE_SETTLED == TxFlags.STATE_SETTLED):
txids_set.remove(txid)
if len(txids_set) == 0:
break
if __name__ == "__main__":
logger = logging.getLogger("main")
logger_urllib3 = logging.getLogger("urllib3")
logger_urllib3.setLevel(logging.WARNING)
async def wait_for_mempool(txids):
async with TxStateWSClient() as ws_client:
await ws_client.block_until_mempool(txids)
async def wait_for_confirmation(txids):
async with TxStateWSClient() as ws_client:
await ws_client.block_until_confirmed(txids)
result1 = requests.post(f'http://127.0.0.1:9999/v1/regtest/dapp/wallets/'
f'worker1.sqlite/load_wallet')
result2 = requests.post(f'http://127.0.0.1:9999/v1/regtest/dapp/wallets/'
f'worker1.sqlite/1/topup_account')
if result2.status_code != 200:
raise requests.exceptions.HTTPError(result2.text)
txids = [result2.json()["txid"]]
logger.info("mine a block to observe the websocket receiving the push notification and "
"unblocking the thread")
asyncio.run(wait_for_confirmation(txids))
|
opendatatools/hedgefund/simu_agent.py | jjcc/OpenData | 1,179 | 44821 | from opendatatools.common import RestAgent, md5
from progressbar import ProgressBar
import json
import pandas as pd
import io
import hashlib
import time
index_map = {
'Barclay_Hedge_Fund_Index' : 'ghsndx',
'Convertible_Arbitrage_Index' : 'ghsca',
'Distressed_Securities_Index' : 'ghsds',
'Emerging_Markets_Index' : 'ghsem',
'Equity_Long_Bias_Index' : 'ghselb',
'Equity_Long_Short_Index' : 'ghsels',
'Equity_Market_Neutral_Index' : 'ghsemn',
'European_Equities_Index' : 'ghsee',
'Event_Driven_Index' : 'ghsed',
'Fixed_Income_Arbitrage_Index' : 'ghsfia',
'Fund_of_Funds_Index' : 'ghsfof',
'Global_Macro_Index' : 'ghsmc',
'Healthcare_&_Biotechnology_Index': 'ghsbio',
'Merger_Arbitrage_Index' : 'ghsma',
'Multi_Strategy_Index' : 'ghsms',
'Pacific_Rim_Equities_Index' : 'ghspre',
'Technology_Index' : 'ghstec',
}
class SimuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.user_info = None
self.df_fundlist = None
self.cookies = None
def login(self, username, password):
url = 'https://passport.simuwang.com/index.php?m=Passport&c=auth&a=login&type=login&name=%s&pass=%s&reme=1&rn=1' % (username, password)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '登录失败'
jsonobj = json.loads(response)
suc = jsonobj['suc']
msg = jsonobj['msg']
if suc != 1:
return None, msg
self.cookies = self.get_cookies()
self.user_info = jsonobj['data']
return self.user_info, msg
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def _get_rz_token(self, time):
mk = time * 158995555893
mtoken = md5(md5(str(mk))) + '.' + str(time)
return mtoken
def _get_fund_list_page(self, page_no):
url = 'https://dc.simuwang.com/ranking/get?page=%s&condition=fund_type:1,6,4,3,8,2;ret:9;rating_year:1;istiered:0;company_type:1;sort_name:profit_col2;sort_asc:desc;keyword:' % page_no
response = self.do_request(url)
if response is None:
return None, '获取数据失败', None
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000:
return None, msg, None
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def load_data(self):
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
process_bar = ProgressBar().start(max_value=page_count)
page_no = page_no + 1
while page_no <= page_count:
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
process_bar.update(page_no)
page_no = page_no + 1
self.df_fundlist = pd.concat(df_list)
return self.df_fundlist, ''
def get_fund_list(self):
if self.df_fundlist is None:
return None, '请先加载数据 load_data'
return self.df_fundlist, ''
def _get_sign(self, url, params):
str = url
for k,v in params.items():
str = str + k + params[k]
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
sign = sha1.hexdigest()
return sign
def _get_token(self, fund_id):
sign = self._get_sign('https://dc.simuwang.com/Api/getToken', {'id' : fund_id})
url = 'https://dc.simuwang.com/Api/getToken?id=%s&sign=%s' % (fund_id, sign)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['message']
if code != 1000 :
return code, msg
self.cookies.update(self.get_cookies())
salt = jsonobj['data']
muid = self.user_info['userid']
#str = 'id%smuid%spage%s%s' % (fund_id, muid, page_no, salt)
str = '%s%s' % (fund_id, salt)
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
token = sha1.hexdigest()
return token, ''
def _get_fund_nav_page(self, fund_id, page_no):
muid = self.user_info['userid']
token, msg = self._get_token(fund_id)
if token is None:
return None, '获取token失败: ' + msg, ''
url = 'https://dc.simuwang.com/fund/getNavList.html'
self.add_headers({'Referer': 'https://dc.simuwang.com/product/%s.html' % fund_id})
data = {
'id' : fund_id,
'muid' : muid,
'page' : str(page_no),
'token': token,
}
response = self.do_request(url, param=data, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000 :
return code, msg, ''
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def _bit_encrypt(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) - k)
return cryText
def _bit_encrypt2(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) ^ ord(key[k]))
return cryText
def _decrypt_data(self, str, func, key):
# return self._bit_encrypt(str, 'cd0a8bee4c6b2f8a91ad5538dde2eb34')
# return self._bit_encrypt(str, '937ab03370497f2b4e8d0599ad25c44c')
# return self._bit_encrypt(str, '083975ce19392492bbccff21a52f1ace')
return func(str, key)
def _get_decrypt_info(self, fund_id):
url = 'https://dc.simuwang.com/product/%s.html' % fund_id
response = self.do_request(url, param=None, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
if "String.fromCharCode(str.charCodeAt(i) - k)" in response:
decrypt_func = self._bit_encrypt
else:
decrypt_func = self._bit_encrypt2
if response.find("return xOrEncrypt(str, ")> 0:
tag = "return xOrEncrypt(str, "
else:
tag = "return bitEncrypt(str, "
pos = response.index(tag) + len(tag) + 1
key = response[pos:pos+32]
return decrypt_func, key
def get_fund_nav(self, fund_id, time_elapse = 0):
if self.user_info is None:
return None, '请先登录'
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
page_no = page_no + 1
while page_no <= page_count:
try_times = 1
while try_times <= 3:
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
if try_times > 3:
return None, msg
else:
try_times = try_times + 1
continue
else:
df_list.append(df)
break
page_no = page_no + 1
if time_elapse > 0:
time.sleep(time_elapse)
df_nav = pd.concat(df_list)
df_nav.drop('c', axis=1, inplace=True)
df_nav.rename(columns={'d': 'date', 'n': 'nav', 'cn' : 'accu_nav', 'cnw' : 'accu_nav_w'}, inplace=True)
# 这个网站搞了太多的小坑
func, key = self._get_decrypt_info(fund_id)
df_nav['nav'] = df_nav['nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x : self._decrypt_data(x, func, key))
#df_nav['nav'] = df_nav['nav'] - df_nav.index * 0.01 - 0.01
#df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x: float(x) - 0.01)
#df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x: float(x) - 0.02)
return df_nav, ''
class BarclayAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({'Referer': 'https://www.barclayhedge.com/research/indices/ghs/Equity_Long_Short_Index.html'})
self.add_headers({'Content - Type': 'application / x - www - form - urlencoded'})
def get_data(self, index):
prog_cod = index_map[index]
url = "https://www.barclayhedge.com/cgi-bin/barclay_stats/ghsndx.cgi"
param = {
'dump': 'excel',
'prog_cod': prog_cod,
}
response = self.do_request(url, param=param, method='POST', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df = excel.parse('Sheet1').dropna(how='all').copy().reset_index().drop(0)
df.columns = ['year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'YTD']
df = df.set_index('year')
return df, ''
return None, "获取数据失败" |
examples/geo/divvy.py | fding253/nxviz | 385 | 44826 | import networkx as nx
import matplotlib.pyplot as plt
from nxviz import GeoPlot
G = nx.read_gpickle("divvy.pkl")
print(list(G.nodes(data=True))[0])
G_new = G.copy()
for n1, n2, d in G.edges(data=True):
if d["count"] < 200:
G_new.remove_edge(n1, n2)
g = GeoPlot(
G_new,
node_lat="latitude",
node_lon="longitude",
node_color="dpcapacity",
node_size=0.005,
)
g.draw()
plt.show()
|
experiments/smal_shape.py | silviazuffi/smalst | 121 | 44842 | <filename>experiments/smal_shape.py
"""
Example usage:
python -m smalst.experiments.smal_shape --zebra_dir='smalst/zebra_no_toys_wtex_1000_0' --num_epochs=100000 --save_epoch_freq=20 --name=smal_net_600 --save_training_imgs=True --num_images=20000 --do_validation=True
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os.path as osp
import numpy as np
import torch
import torchvision
from torch.autograd import Variable
import scipy.io as sio
import scipy
import scipy.misc
from collections import OrderedDict
import pickle as pkl
from ..data import zebra as zebra_data
from ..utils import visutil
from ..utils import smal_vis
from ..utils import image as image_utils
from ..nnutils import train_utils
from ..nnutils import loss_utils
from ..nnutils import smal_mesh_net
from ..nnutils.nmr import NeuralRenderer
from ..nnutils import geom_utils
flags.DEFINE_string('dataset', 'zebra', 'zebra')
# Weights:
flags.DEFINE_float('kp_loss_wt', 10., 'keypoint loss weight')
flags.DEFINE_float('kp_2D_loss_wt', 10., 'loss weight for the 2D keypoints predicted by the network')
flags.DEFINE_float('mask_loss_wt', 30., 'mask loss weight')
flags.DEFINE_float('cam_loss_wt', 10000., 'weights to camera loss')
flags.DEFINE_float('deform_reg_wt', 100., 'reg to deformation')
flags.DEFINE_float('triangle_reg_wt', 100., 'weights to triangle smoothness prior')
flags.DEFINE_float('vert2kp_loss_wt', .16, 'reg to vertex assignment')
flags.DEFINE_float('tex_loss_wt', 10., 'weights to tex loss')
flags.DEFINE_boolean('grad_v_in_tex_loss', False, '')
flags.DEFINE_boolean('use_keypoints', True, 'use keypoints loss')
flags.DEFINE_boolean('use_mask', True, 'use mask loss')
flags.DEFINE_boolean('use_shape_reg', False, 'use shape regularizers')
flags.DEFINE_float('tex_map_loss_wt', 10., 'weights to tex map loss')
flags.DEFINE_float('tex_dt_loss_wt', .5, 'weights to tex dt loss')
flags.DEFINE_float('mod_trans_loss_wt', 4000., 'weights for model translation loss')
flags.DEFINE_float('mod_pose_loss_wt', 200000., 'weights for model pose loss')
flags.DEFINE_float('betas_reg_wt', 100000., 'weights for betas prior loss')
flags.DEFINE_float('delta_v_loss_wt', 100000., 'weights for model delta_v')
flags.DEFINE_float('occ_loss_wt', 100., 'weights for occlusion loss')
flags.DEFINE_boolean('infer_vert2kp', False, 'estimate keypoints on the 3D model instead of using predefined values.')
flags.DEFINE_boolean('no_delta_v', False, 'set predicted deformations to zero')
flags.DEFINE_boolean('use_gtpose', False, 'if true uses gt pose for projection, but trans still gets trained.')
flags.DEFINE_boolean('use_gttrans', False, 'if true uses gt trans for projection, but pose still gets trained.')
flags.DEFINE_boolean('use_gtcam', False, 'if true uses gt cam for projection, but cam still gets trained.')
flags.DEFINE_boolean('use_gtbetas', False, 'if true uses gt betas for projection, but betas still gets trained.')
flags.DEFINE_boolean('use_gtdeltav', False, '')
flags.DEFINE_boolean('use_gttexture', False, '')
flags.DEFINE_boolean('use_camera_loss', True, 'if train with gt camera')
flags.DEFINE_boolean('random_bkg', False, 'if using a random background rather than black in the pred image')
flags.DEFINE_boolean('use_perceptual_loss', True, '')
flags.DEFINE_boolean('uv_flow', True, '')
flags.DEFINE_float('uv_flow_loss_wt', 100000., 'weights for uv_flow loss')
flags.DEFINE_boolean('use_pose_geodesic_loss', True, '')
flags.DEFINE_boolean('use_loss_on_whole_image', False, 'if compose the predicted animal with the image background')
flags.DEFINE_boolean('use_tex_dt', True, 'if use loss (4) in the birds paper')
flags.DEFINE_boolean('white_balance_for_texture_map', False, '')
flags.DEFINE_boolean('use_img_as_background', False, 'if to use the input image as background for the optimization')
flags.DEFINE_boolean('use_gtmask_for_background', False, 'if to use the input image as background for the optimization')
flags.DEFINE_boolean('use_per_image_rgb_bg', False, 'if to compute per-imag rgb colors for background in optimization')
opts = flags.FLAGS
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
class ShapeTrainer(train_utils.Trainer):
def define_model(self):
opts = self.opts
self.symmetric = opts.symmetric
img_size = (opts.img_size, opts.img_size)
texture_mask_path = 'smalst/'+opts.dataset+'_data/texture_maps/my_smpl_00781_4_all_template_w_tex_uv_001_mask_small.png'
self.texture_map_mask = torch.Tensor(scipy.misc.imread(texture_mask_path) / 255.0).cuda(device=opts.gpu_id)
tex_masks = None
data_path = 'smalst/smpl_models/my_smpl_data_00781_4_all.pkl'
data = pkl.load(open(data_path))
pca_var = data['eigenvalues'][:opts.num_betas]
self.betas_prec = torch.Tensor(pca_var).cuda(device=opts.gpu_id).expand(opts.batch_size, opts.num_betas)
self.model = smal_mesh_net.MeshNet(
img_size, opts, nz_feat=opts.nz_feat, num_kps=opts.num_kps, tex_masks=tex_masks)
if opts.num_pretrain_epochs > 0:
self.load_network(self.model, 'pred', opts.num_pretrain_epochs)
self.model = self.model.cuda(device=opts.gpu_id)
if not opts.infer_vert2kp:
self.vert2kp = torch.Tensor(pkl.load(open('smalst/'+opts.dataset+'_data/verts2kp.pkl'))).cuda(device=opts.gpu_id)
# Data structures to use for triangle priors.
edges2verts = self.model.edges2verts
# B x E x 4
edges2verts = np.tile(np.expand_dims(edges2verts, 0), (opts.batch_size, 1, 1))
self.edges2verts = Variable(torch.LongTensor(edges2verts).cuda(device=opts.gpu_id), requires_grad=False)
# For renderering.
faces = self.model.faces.view(1, -1, 3)
self.faces = faces.repeat(opts.batch_size, 1, 1)
self.renderer = NeuralRenderer(opts.img_size, opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
if opts.texture:
self.tex_renderer = NeuralRenderer(opts.img_size, opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
# Only use ambient light for tex renderer
if opts.use_directional_light:
self.tex_renderer.directional_light_only()
else:
self.tex_renderer.ambient_light_only()
# For visualization
self.vis_rend = smal_vis.VisRenderer(opts.img_size, faces.data.cpu().numpy(), opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
self.background_imgs = None
return
def init_dataset(self):
opts = self.opts
if opts.dataset == 'zebra':
self.data_module = zebra_data
else:
print('Unknown dataset %d!' % opts.dataset)
self.dataloader = self.data_module.data_loader(opts)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def define_criterion(self):
if opts.use_keypoints:
self.projection_loss = loss_utils.kp_l2_loss
if opts.use_mask:
self.mask_loss_fn = loss_utils.mask_loss
if opts.infer_vert2kp:
self.entropy_loss = loss_utils.entropy_loss
if self.opts.use_camera_loss:
self.camera_loss = loss_utils.camera_loss
if opts.use_smal_betas:
self.betas_loss_fn = loss_utils.betas_loss
self.delta_v_loss_fn = loss_utils.delta_v_loss
if self.opts.texture:
if self.opts.use_perceptual_loss:
if False:
self.texture_loss = loss_utils.MSE_texture_loss
else:
self.texture_loss = loss_utils.PerceptualTextureLoss()
else:
self.texture_loss = loss_utils.texture_loss
self.texture_dt_loss_fn = loss_utils.texture_dt_loss
if opts.texture_map:
self.texture_map_loss = loss_utils.texture_map_loss
if opts.uv_flow:
self.uv_flow_loss = loss_utils.uv_flow_loss
self.model_trans_loss_fn = loss_utils.model_trans_loss
self.model_pose_loss_fn = loss_utils.model_pose_loss
def set_optimization_input(self):
opts = self.opts
cams = np.zeros((self.scale_pred.shape[0], 3))
cams[:,0] = self.scale_pred.data
cams[:,1:] = 128
self.cams = Variable(torch.FloatTensor(cams).cuda(device=opts.gpu_id), requires_grad=False)
self.model_trans = Variable(self.trans_pred.cuda(device=opts.gpu_id), requires_grad=False)
def set_optimization_variables(self):
'''
Sets as optimization variables those obtained as prediction from the network
'''
opts = self.opts
cams = np.zeros((self.scale_pred.shape[0], 3))
cams[:,0] = self.scale_pred.data
cams[:,1:] = 128
# Prediction is gt
self.cams = Variable(torch.FloatTensor(cams).cuda(device=opts.gpu_id), requires_grad=False)
self.model_pose = Variable(self.pose_pred.cuda(device=opts.gpu_id), requires_grad=False)
self.model_trans = Variable(self.trans_pred.cuda(device=opts.gpu_id), requires_grad=False)
self.delta_v= Variable(self.delta_v.cuda(device=opts.gpu_id), requires_grad=False)
def set_input(self, batch):
opts = self.opts
# Image with annotations.
input_img_tensor = batch['img'].type(torch.FloatTensor)
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
img_tensor = batch['img'].type(torch.FloatTensor)
self.input_imgs = Variable( input_img_tensor.cuda(device=opts.gpu_id), requires_grad=False)
self.imgs = Variable( img_tensor.cuda(device=opts.gpu_id), requires_grad=False)
#if opts.use_mask and 'mask' in batch.keys():
if 'mask' in batch.keys():
mask_tensor = batch['mask'].type(torch.FloatTensor)
self.masks = Variable( mask_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.masks = None
if opts.use_keypoints and 'kp' in batch.keys():
kp_tensor = batch['kp'].type(torch.FloatTensor)
self.kps = Variable( kp_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.kps = None
self.img_paths = batch['img_path']
if 'camera_params' in batch.keys():
cam_tensor = batch['camera_params'].type(torch.FloatTensor)
if opts.use_norm_f_and_z:
cam_tensor[:,0] = (cam_tensor[:,0]-opts.norm_f0)/opts.norm_f
self.cams = Variable( cam_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.cams = None
cam_c_tensor = batch['camera_params_c'].type(torch.FloatTensor)
self.cams_center = Variable(cam_c_tensor.cuda(device=opts.gpu_id), requires_grad=False)
if 'model_trans' in batch.keys():
model_trans_tensor = batch['model_trans'].type(torch.FloatTensor)
if opts.use_norm_f_and_z:
model_trans_tensor[:,2] = model_trans_tensor[:,2]-opts.norm_z +1.
self.model_trans = Variable(
model_trans_tensor.cuda(device=opts.gpu_id), requires_grad=False)
if 'model_pose' in batch.keys():
model_pose_tensor = batch['model_pose'].type(torch.FloatTensor)
self.model_pose = Variable(
model_pose_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_trans = None
self.model_pose = None
if 'model_betas' in batch.keys():
model_betas_tensor = batch['model_betas'][:,:self.opts.num_betas].type(torch.FloatTensor)
self.model_betas = Variable(
model_betas_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_betas = None
if 'model_delta_v' in batch.keys():
model_delta_v_tensor = batch['model_delta_v'].type(torch.FloatTensor)
self.model_delta_v = Variable(
model_delta_v_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_delta_v = None
if opts.texture_map:
assert('texture_map' in batch.keys())
texture_map_tensor = batch['texture_map'].type(torch.FloatTensor)
self.texture_map = Variable(texture_map_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.texture_map = None
if 'uv_flow' in batch.keys():
uv_flow_tensor = batch['uv_flow'].type(torch.FloatTensor).permute(0,3,1,2)
self.uv_flow_gt = Variable(uv_flow_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.uv_flow_gt = None
# Compute barrier distance transform.
#if opts.use_mask and self.masks is not None:
if self.masks is not None:
mask_dts = np.stack([image_utils.compute_dt_barrier(m) for m in batch['mask']])
dt_tensor = torch.FloatTensor(mask_dts).cuda(device=opts.gpu_id)
# B x 1 x N x N
self.dts_barrier = Variable(dt_tensor, requires_grad=False).unsqueeze(1)
def forward(self, opts_scale=None, opts_pose=None, opts_trans=None, opts_delta_v=None):
opts = self.opts
if opts.use_double_input:
masks = self.input_imgs*self.masks
else:
masks = None
if opts.texture:
pred_codes, self.textures = self.model.forward(self.input_imgs, masks)
else:
pred_codes = self.model.forward(self.input_imgs, masks)
self.delta_v, self.scale_pred, self.trans_pred, self.pose_pred, self.betas_pred, self.kp_2D_pred = pred_codes
if opts.fix_trans:
self.trans_pred[:,2] = self.model_trans[:,2]
if opts.use_gttrans:
print('Using gt trans')
self.trans_pred = self.model_trans
if opts.use_gtpose:
print('Using gt pose')
self.pose_pred = self.model_pose
if opts.use_gtcam:
print('Using gt cam')
self.scale_pred = self.cams[:,0,None]
if opts.use_gtbetas:
print('Using gt betas')
self.betas_pred = self.model_betas
if opts.use_gtdeltav:
print('Using gt delta_v')
self.delta_v = self.model_delta_v
if self.cams is not None:
# The camera center does not change; here we predicting flength
self.cam_pred = torch.cat([self.scale_pred, self.cams[:,1:]], 1)
else:
self.cam_pred = torch.cat([self.scale_pred, self.cams_center], 1)
if opts.only_mean_sym:
del_v = self.delta_v
else:
del_v = self.model.symmetrize(self.delta_v)
if opts.no_delta_v:
del_v[:] = 0
if opts.use_smal_pose:
self.pred_v = self.model.get_smal_verts(self.pose_pred, self.betas_pred, self.trans_pred, del_v)
else:
# TODO
self.mean_shape = self.model.get_mean_shape()
self.pred_v = self.mean_shape + del_v + self.trans_pred
# Compute keypoints.
if opts.infer_vert2kp:
self.vert2kp = torch.nn.functional.softmax(self.model.vert2kp, dim=1)
self.kp_verts = torch.matmul(self.vert2kp, self.pred_v)
# Set projection camera
proj_cam = self.cam_pred
# Project keypoints
if opts.use_keypoints:
self.kp_pred = self.renderer.project_points(self.kp_verts, proj_cam)
# Render mask.
self.mask_pred = self.renderer.forward(self.pred_v, self.faces, proj_cam)
if opts.texture:
self.texture_flow = self.textures
self.textures = geom_utils.sample_textures(self.texture_flow, self.imgs)
tex_size = self.textures.size(2)
self.textures = self.textures.unsqueeze(4).repeat(1, 1, 1, 1, tex_size, 1)
if opts.use_gttexture:
idx=0
from ..utils.obj2nmr import obj2nmr_uvmap
uv_map = obj2nmr_uvmap(self.model.ft, self.model.vt, tex_size=tex_size)
uv_img = self.texture_map[idx,:,:,:]
uv_img = uv_img.permute(1,2,0)
texture_t = sample_texture(uv_map, uv_img)
self.textures[0,:,:,:,:,:] = texture_t[0,:,:,:,:,:]
if opts.grad_v_in_tex_loss:
self.texture_pred = self.tex_renderer.forward(self.pred_v, self.faces, proj_cam.detach(), textures=self.textures)
else:
self.texture_pred = self.tex_renderer.forward(self.pred_v.detach(), self.faces, proj_cam.detach(), textures=self.textures)
else:
self.textures = None
if opts.save_training_imgs and opts.use_mask and self.masks is not None:
T = 255*self.mask_pred.cpu().detach().numpy()[0,:,:]
scipy.misc.imsave(opts.name + '_mask_pred.png', T)
T = 255*self.masks.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))[:,:,0]
scipy.misc.imsave(opts.name + '_mask_gt.png', T)
# Compute losses for this instance.
if self.opts.use_keypoints and self.kps is not None:
self.kp_loss = self.projection_loss(self.kp_pred, self.kps)
if self.opts.use_mask and self.masks is not None:
self.mask_loss = self.mask_loss_fn(self.mask_pred, self.masks[:,0,:,:])
if self.opts.use_camera_loss and self.cams is not None:
self.cam_loss = self.camera_loss(self.cam_pred, self.cams, 0, self.opts.use_norm_f_and_z)
if self.model_trans is not None:
self.mod_trans_loss = self.model_trans_loss_fn(self.trans_pred, self.model_trans)
if self.model_pose is not None:
self.mod_pose_loss = self.model_pose_loss_fn(self.pose_pred, self.model_pose, self.opts)
if opts.texture:
if opts.use_loss_on_whole_image:
if self.background_imgs is None:
print("SETTING BACKGROUND MODEL")
self.background_imgs = np.zeros(self.imgs.shape)
fg_mask = self.mask_pred.detach().cpu().numpy()
I = self.imgs.detach().cpu().numpy()
bg_mask = np.abs(fg_mask-1)
rgb = np.zeros((3))
n = np.sum(bg_mask)
for c in range(3):
I[:,c,:,:] = I[:,c,:,:] * bg_mask
rgb[c] = np.sum(I[0,c,:,:])/n
if self.background_model_top is not None:
N = 128
for c in range(3):
self.background_imgs[:,c,:N,:] = self.background_model_top[c]
self.background_imgs[:,c,N:,:] = self.background_model_bottom[c]
else:
# This is what we use for optimization
if opts.use_per_image_rgb_bg:
self.background_imgs[:,0,:,:] = rgb[0]
self.background_imgs[:,1,:,:] = rgb[1]
self.background_imgs[:,2,:,:] = rgb[2]
else:
self.background_imgs[:,0,:,:] = .6964
self.background_imgs[:,1,:,:] = .5806
self.background_imgs[:,2,:,:] = .4780
# Verification experiment: replace with image
if opts.use_img_as_background:
self.background_imgs[:,0,:,:] = self.imgs.data[:,0,:,:]
self.background_imgs[:,1,:,:] = self.imgs.data[:,1,:,:]
self.background_imgs[:,2,:,:] = self.imgs.data[:,2,:,:]
self.background_imgs = torch.Tensor(self.background_imgs).cuda(device=opts.gpu_id)
if self.masks is not None:
if opts.use_loss_on_whole_image:
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.mask_pred, None, self.background_imgs)
else:
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.mask_pred, self.masks[:,0,:,:])
if opts.use_tex_dt:
self.tex_dt_loss = self.texture_dt_loss_fn(self.texture_flow, self.dts_barrier[:,:,:,:,0])
else:
if opts.use_loss_on_whole_image:
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.mask_pred, None, self.background_imgs)
else:
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.mask_pred, None)
if opts.texture_map and self.texture_map is not None:
uv_flows = self.model.texture_predictor.uvimage_pred
uv_flows = uv_flows.permute(0, 2, 3, 1)
uv_images = torch.nn.functional.grid_sample(self.imgs, uv_flows)
self.tex_map_loss = self.texture_map_loss(uv_images, self.texture_map, self.texture_map_mask, self.opts)
if opts.uv_flow and self.uv_flow_gt is not None:
uv_flows = self.model.texture_predictor.uvimage_pred
self.uv_f_loss = self.uv_flow_loss(uv_flows, self.uv_flow_gt)
# Priors:
if opts.infer_vert2kp:
self.vert2kp_loss = self.entropy_loss(self.vert2kp)
if opts.use_smal_betas:
self.betas_loss = self.betas_loss_fn(self.betas_pred, self.model_betas, self.betas_prec)
if self.model_delta_v is not None:
self.delta_v_loss = self.delta_v_loss_fn(self.delta_v, self.model_delta_v)
# Finally sum up the loss.
# Instance loss:
if opts.use_keypoints and self.kps is not None:
self.total_loss = opts.kp_loss_wt * self.kp_loss
if opts.use_mask and self.masks is not None:
self.total_loss += opts.mask_loss_wt * self.mask_loss
else:
if opts.use_mask and self.masks is not None:
self.total_loss = opts.mask_loss_wt * self.mask_loss
else:
self.total_loss = 0
if not opts.use_gtcam and self.opts.use_camera_loss and self.cams is not None:
self.total_loss += opts.cam_loss_wt * self.cam_loss
if opts.texture:
self.total_loss += opts.tex_loss_wt * self.tex_loss
if opts.texture_map and self.texture_map is not None:
self.total_loss += opts.tex_map_loss_wt * self.tex_map_loss
if opts.uv_flow and self.uv_flow_gt is not None:
self.total_loss += opts.uv_flow_loss_wt * self.uv_f_loss
if self.model_trans is not None:
if not opts.use_gttrans:
self.total_loss += opts.mod_trans_loss_wt * self.mod_trans_loss
if self.model_pose is not None:
if not opts.use_gtpose:
self.total_loss += opts.mod_pose_loss_wt * self.mod_pose_loss
if self.model_delta_v is not None:
self.total_loss += opts.delta_v_loss_wt*self.delta_v_loss
# Priors:
if opts.infer_vert2kp:
self.total_loss += opts.vert2kp_loss_wt * self.vert2kp_loss
if opts.use_smal_betas:
self.total_loss += opts.betas_reg_wt * self.betas_loss
if opts.texture and self.masks is not None and opts.use_tex_dt:
self.total_loss += opts.tex_dt_loss_wt * self.tex_dt_loss
def get_current_visuals(self):
vis_dict = {}
try:
mask_concat = torch.cat([self.masks[:,0,:,:], self.mask_pred], 2)
except:
import pdb; pdb.set_trace()
if self.opts.texture:
# B x 2 x H x W
uv_flows = self.model.texture_predictor.uvimage_pred
# B x H x W x 2
uv_flows = uv_flows.permute(0, 2, 3, 1)
uv_images = torch.nn.functional.grid_sample(self.imgs, uv_flows)
num_show = min(2, self.opts.batch_size)
show_uv_imgs = []
show_uv_flows = []
for i in range(num_show):
input_img = smal_vis.kp2im(self.kps[i].data, self.imgs[i].data)
pred_kp_img = smal_vis.kp2im(self.kp_pred[i].data, self.imgs[i].data)
masks = smal_vis.tensor2mask(mask_concat[i].data)
if self.opts.texture:
texture_here = self.textures[i]
else:
texture_here = None
rend_predcam = self.vis_rend(self.pred_v[i], self.cam_pred[i], texture=texture_here)
# Render from front & back:
rend_frontal = self.vis_rend.diff_vp(self.pred_v[i], self.cam_pred[i], texture=texture_here, kp_verts=self.kp_verts[i])
rend_top = self.vis_rend.diff_vp(self.pred_v[i], self.cam_pred[i], axis=[0, 1, 0], texture=texture_here, kp_verts=self.kp_verts[i])
diff_rends = np.hstack((rend_frontal, rend_top))
if self.opts.texture:
uv_img = smal_vis.tensor2im(uv_images[i].data)
show_uv_imgs.append(uv_img)
uv_flow = smal_vis.visflow(uv_flows[i].data)
show_uv_flows.append(uv_flow)
tex_img = smal_vis.tensor2im(self.texture_pred[i].data)
imgs = np.hstack((input_img, pred_kp_img, tex_img))
else:
imgs = np.hstack((input_img, pred_kp_img))
rend_gtcam = self.vis_rend(self.pred_v[i], self.cams[i], texture=texture_here)
rends = np.hstack((diff_rends, rend_predcam, rend_gtcam))
vis_dict['%d' % i] = np.hstack((imgs, rends, masks))
vis_dict['masked_img %d' % i] = smal_vis.tensor2im((self.imgs[i] * self.masks[i]).data)
if self.opts.texture:
vis_dict['uv_images'] = np.hstack(show_uv_imgs)
vis_dict['uv_flow_vis'] = np.hstack(show_uv_flows)
return vis_dict
def get_current_points(self):
return {
'mean_shape': visutil.tensor2verts(self.mean_shape.data),
'verts': visutil.tensor2verts(self.pred_v.data),
}
def get_current_scalars(self):
sc_dict = OrderedDict([
('smoothed_total_loss', self.smoothed_total_loss),
('total_loss', self.total_loss.item()),
])
if self.opts.use_smal_betas:
sc_dict['betas_reg'] = self.betas_loss.item()
if self.opts.use_mask and self.masks is not None:
sc_dict['mask_loss'] = self.mask_loss.item()
if self.opts.use_keypoints and self.kps is not None:
sc_dict['kp_loss'] = self.kp_loss.item()
if self.opts.use_camera_loss and self.cams is not None:
sc_dict['cam_loss'] = self.cam_loss.item()
if self.opts.texture:
sc_dict['tex_loss'] = self.tex_loss.item()
if self.opts.texture_map and self.opts.use_tex_dt and self.masks is not None:
sc_dict['tex_dt_loss'] = self.tex_dt_loss.item()
if self.opts.uv_flow and self.uv_flow_gt is not None:
sc_dict['uv_flow_loss'] = self.uv_f_loss.item()
if self.opts.texture_map and self.texture_map is not None:
sc_dict['tex_map_loss'] = self.tex_map_loss.item()
if self.model_trans is not None:
sc_dict['model_trans_loss'] = self.mod_trans_loss.item()
if self.model_pose is not None:
sc_dict['model_pose_loss'] = self.mod_pose_loss.item()
if opts.infer_vert2kp:
sc_dict['vert2kp_loss'] = self.vert2kp_loss.item()
if self.model_delta_v is not None:
sc_dict['model_delta_v_loss'] = self.delta_v_loss.item()
return sc_dict
def main(_):
torch.manual_seed(0)
np.random.seed(0)
trainer = ShapeTrainer(opts)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)
|
everbug/utils/manager.py | everhide/everbug | 182 | 44854 | <reponame>everhide/everbug
class _Manager(type):
""" Singletone for cProfile manager """
_inst = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._inst:
cls._inst[cls] = super(_Manager, cls).__call__(*args, **kwargs)
return cls._inst[cls]
class ProfileManager(metaclass=_Manager):
def __init__(self):
self._profiles = list()
def clear(self):
self._profiles.clear()
def add(self, profile):
self._profiles.append(profile)
def profiles(self):
return self._profiles
@property
def count(self):
return len(self._profiles)
|
examples/rl/train/play.py | ONLYA/RoboGrammar | 156 | 44902 | <reponame>ONLYA/RoboGrammar
import sys
import os
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
sys.path.append(base_dir)
sys.path.append(os.path.join(base_dir, 'rl'))
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import gym
gym.logger.set_level(40)
import environments
from rl.train.evaluation import render, render_full
from rl.train.arguments import get_parser
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.envs import make_vec_envs, make_env
from a2c_ppo_acktr.model import Policy
parser = get_parser()
parser.add_argument('--model-path', type = str, required = True)
args = parser.parse_args()
if not os.path.isfile(args.model_path):
print_error('Model file does not exist')
torch.manual_seed(0)
torch.set_num_threads(1)
device = torch.device('cpu')
render_env = gym.make(args.env_name, args = args)
render_env.seed(0)
envs = make_vec_envs(args.env_name, 0, 4, 0.995, None, device, False, args = args)
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': False})
actor_critic.to(device)
ob_rms = utils.get_vec_normalize(envs).ob_rms
actor_critic, ob_rms = torch.load(args.model_path)
actor_critic.eval()
envs.close()
render_full(render_env, actor_critic, ob_rms, deterministic = True, repeat = True)
|
learning_python/lesson7/exercise4.py | fallenfuzz/pynet | 528 | 44905 | <filename>learning_python/lesson7/exercise4.py
#!/usr/bin/env python
"""
Take the YAML file and corresponding data structure that you defined in exercise3b:
{'interfaces': {
'Ethernet1': {'mode': 'access', 'vlan': 10},
'Ethernet2': {'mode': 'access', 'vlan': 20},
'Ethernet3': {'mode': 'trunk',
'native_vlan': 1,
'trunk_vlans': 'all'}
}
}
From this YAML data input source, use Jinja templating to generate the following configuration
output:
----
interface Ethernet1
switchport mode access
switchport access vlan 10
interface Ethernet2
switchport mode access
switchport access vlan 20
interface Ethernet3
switchport mode trunk
switchport trunk native vlan 1
switchport trunk allowed vlan all
----
The following should all be variables in your Jinja template (the names may be different than
below, but they should be variabilized and not be hard-coded in your template).
----
interface_name
switchport_mode
access_vlan
native_vlan
trunk_vlans
----
All your Jinja2 variables should be retrieved from your YAML file.
This exercise might be challenging.
"""
from __future__ import print_function, unicode_literals
import yaml
import jinja2
yaml_file = "exercise3b.yml"
with open(yaml_file) as f:
template_vars = yaml.load(f)
template_file = "interface_config.j2"
with open(template_file) as f:
jinja_template = f.read()
template = jinja2.Template(jinja_template)
print(template.render(template_vars))
|
finance_ml/model_selection/__init__.py | BTETON/finance_ml | 446 | 44906 | from .kfold import PurgedKFold, CPKFold, generate_signals
from .score import cv_score
from .pipeline import Pipeline
from .hyper import clf_hyper_fit
from .distribution import LogUniformGen, log_uniform
from .utils import evaluate |
alipay/aop/api/domain/MiniAppDeployResponse.py | antopen/alipay-sdk-python-all | 213 | 44913 | <filename>alipay/aop/api/domain/MiniAppDeployResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MiniAppDeployResponse(object):
def __init__(self):
self._android_client_max = None
self._android_client_min = None
self._app_version = None
self._bundle_id = None
self._deploy_version = None
self._gmt_create = None
self._gmt_modified = None
self._ios_client_max = None
self._ios_client_min = None
self._mini_app_id = None
self._package_size = None
self._status = None
@property
def android_client_max(self):
return self._android_client_max
@android_client_max.setter
def android_client_max(self, value):
self._android_client_max = value
@property
def android_client_min(self):
return self._android_client_min
@android_client_min.setter
def android_client_min(self, value):
self._android_client_min = value
@property
def app_version(self):
return self._app_version
@app_version.setter
def app_version(self, value):
self._app_version = value
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def deploy_version(self):
return self._deploy_version
@deploy_version.setter
def deploy_version(self, value):
self._deploy_version = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def ios_client_max(self):
return self._ios_client_max
@ios_client_max.setter
def ios_client_max(self, value):
self._ios_client_max = value
@property
def ios_client_min(self):
return self._ios_client_min
@ios_client_min.setter
def ios_client_min(self, value):
self._ios_client_min = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def package_size(self):
return self._package_size
@package_size.setter
def package_size(self, value):
self._package_size = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.android_client_max:
if hasattr(self.android_client_max, 'to_alipay_dict'):
params['android_client_max'] = self.android_client_max.to_alipay_dict()
else:
params['android_client_max'] = self.android_client_max
if self.android_client_min:
if hasattr(self.android_client_min, 'to_alipay_dict'):
params['android_client_min'] = self.android_client_min.to_alipay_dict()
else:
params['android_client_min'] = self.android_client_min
if self.app_version:
if hasattr(self.app_version, 'to_alipay_dict'):
params['app_version'] = self.app_version.to_alipay_dict()
else:
params['app_version'] = self.app_version
if self.bundle_id:
if hasattr(self.bundle_id, 'to_alipay_dict'):
params['bundle_id'] = self.bundle_id.to_alipay_dict()
else:
params['bundle_id'] = self.bundle_id
if self.deploy_version:
if hasattr(self.deploy_version, 'to_alipay_dict'):
params['deploy_version'] = self.deploy_version.to_alipay_dict()
else:
params['deploy_version'] = self.deploy_version
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.ios_client_max:
if hasattr(self.ios_client_max, 'to_alipay_dict'):
params['ios_client_max'] = self.ios_client_max.to_alipay_dict()
else:
params['ios_client_max'] = self.ios_client_max
if self.ios_client_min:
if hasattr(self.ios_client_min, 'to_alipay_dict'):
params['ios_client_min'] = self.ios_client_min.to_alipay_dict()
else:
params['ios_client_min'] = self.ios_client_min
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.package_size:
if hasattr(self.package_size, 'to_alipay_dict'):
params['package_size'] = self.package_size.to_alipay_dict()
else:
params['package_size'] = self.package_size
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MiniAppDeployResponse()
if 'android_client_max' in d:
o.android_client_max = d['android_client_max']
if 'android_client_min' in d:
o.android_client_min = d['android_client_min']
if 'app_version' in d:
o.app_version = d['app_version']
if 'bundle_id' in d:
o.bundle_id = d['bundle_id']
if 'deploy_version' in d:
o.deploy_version = d['deploy_version']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'ios_client_max' in d:
o.ios_client_max = d['ios_client_max']
if 'ios_client_min' in d:
o.ios_client_min = d['ios_client_min']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'package_size' in d:
o.package_size = d['package_size']
if 'status' in d:
o.status = d['status']
return o
|
test/util_test.py | sbienkow/eg | 1,389 | 44966 | import json
import os
from eg import config
from eg import substitute
from eg import util
from mock import Mock
from mock import patch
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_unsqueezed.md'
)
PATH_SQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_squeezed.md'
)
def _create_config(
examples_dir=None,
custom_dir=None,
color_config=None,
use_color=True,
pager_cmd=None,
editor_cmd=None,
squeeze=False,
subs=None
):
"""
Create a config.Config object with default values for expediency in
testing.
"""
return config.Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs
)
@patch('os.walk')
def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
expected = ['/Users/tyrion/cp.md']
mock_walk.return_value = [
[examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = 'cp.md'
mock_walk.return_value = [
[
examples_dir,
['dirA', 'dirB'],
[program_file, 'cp.txt', 'other_file.md'],
],
[
examples_dir + '/dirA',
['dirA-child'],
[program_file, 'bad.md'],
],
[
examples_dir + '/dirA/dirA-child',
[],
['bad.md', program_file, 'wtf.md'],
],
[
examples_dir + '/dirB',
[],
['foo.md', program_file],
],
]
expected = [
'/Users/tyrion/cp.md',
'/Users/tyrion/dirA/cp.md',
'/Users/tyrion/dirA/dirA-child/cp.md',
'/Users/tyrion/dirB/cp.md',
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_none(mock_walk):
expected = []
mock_walk.return_value = []
actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
assert actual == expected
mock_walk.assert_called_once_with('/Users/tyrion')
@patch('os.walk')
def test_get_file_paths_for_program_with_no_dir(mock_walk):
assert util.get_file_paths_for_program('cp', None) == []
@patch('eg.util.page_string')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_resolved_program')
def test_handle_program_no_entries(
mock_resolve_program,
mock_get_contents,
mock_format,
mock_page_string,
):
"""
We should do the right thing if there are no entries for a given program.
"""
program = 'cp'
test_config = _create_config()
mock_resolve_program.return_value = program
util.handle_program(program, test_config)
mock_resolve_program.assert_called_once_with(
program,
test_config
)
# We should have aborted and not called any of the
# other methods.
assert mock_get_contents.call_count == 0
assert mock_format.call_count == 0
assert mock_page_string.call_count == 0
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
program = 'mv'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of mv.md.'
formatted_contents = 'and I am the formatted contents of mv.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect=return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = program
util.handle_program(program, test_config)
mock_resolve.assert_called_once_with(
program,
test_config
)
mock_get_paths.assert_any_call(
program,
examples_dir
)
mock_get_paths.assert_any_call(
program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
custom_paths[1],
default_paths[0],
default_paths[1],
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
alias_for_program = 'link'
resolved_program = 'ln'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of ln.md.'
formatted_contents = 'and I am the formatted contents of ln.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/ln.md']
custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != resolved_program:
raise NameError(
'expected ' +
resolved_program +
', got ' +
program_param
)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect = return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = resolved_program
util.handle_program(
alias_for_program,
test_config
)
mock_resolve.assert_called_once_with(
alias_for_program,
test_config
)
mock_get_paths.assert_any_call(
resolved_program,
examples_dir
)
mock_get_paths.assert_any_call(
resolved_program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
default_paths[0]
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
def test_get_list_of_all_supported_commands(tmpdir):
dir_example = tmpdir.mkdir('examples')
dir_custom = tmpdir.mkdir('custom')
config = _create_config(
examples_dir=str(dir_example),
custom_dir=str(dir_custom),
)
expected = [
'a-only-default',
'b-both *',
'c-only-custom +',
'd-only-custom-nested +',
'e-only-default-nested',
'f-default-custom-nested',
'g-both-different-levels *',
't-a-only-default-alias -> a-only-default',
'u-b-both-alias -> b-both *',
'v-c-only-custom-alias -> c-only-custom +'
]
aliases = {
't-a-only-default-alias': 'a-only-default',
'u-b-both-alias': 'b-both',
'v-c-only-custom-alias': 'c-only-custom'
}
# Make the directory structure we expect.
dir_example_nested = dir_example.mkdir('default-nested')
dir_custom_nested = dir_custom.mkdir('custom-nested')
dir_example.join('a-only-default.md').write('foo')
dir_example.join('b-both.md').write('foo')
dir_custom.join('b-both.md').write('foo')
dir_custom.join('c-only-custom.md').write('foo')
dir_custom_nested.join('d-only-custom-nested.md').write('foo')
dir_example_nested.join('e-only-default-nested.md').write('foo')
dir_example_nested.join('f-default-custom-nested.md').write('foo')
dir_example.join('g-both-different-levels.md').write('foo')
dir_custom_nested.join('g-both-different-levels.md').write('foo')
# Use the 'with' context manager rather than the @decorator, because the
# tmpdir fixture doesn't play nice with the decorator.
with patch('eg.util.get_alias_dict') as mock_get_alias:
mock_get_alias.return_value = aliases
actual = util.get_list_of_all_supported_commands(config)
assert actual == expected
mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
test_config = _create_config()
actual = util.get_list_of_all_supported_commands(test_config)
target = []
assert actual == target
def test_calls_pipepager_if_not_less():
"""
We're special casing less a bit, as it is the default value, so if a custom
command has been set that is NOT less, we should call pipepager straight
away.
"""
_helper_assert_about_pager('page me plz', 'cat', False)
def test_calls_fallback_pager_if_none():
"""
If pager_cmd is None, we should just use the fallback pager.
"""
_helper_assert_about_pager('page me plz', None, True)
def test_calls_pipepager_if_less():
"""
We should call pipepager if we ask to use less and less is installed on the
machine.
"""
_helper_assert_about_pager('a fancy value to page', 'less -R', False)
def test_calls_fallback_if_cmd_is_flag_string():
"""
We are using a flag string to indicate if we should use the fallback pager.
"""
_helper_assert_about_pager(
'page via fallback',
util.FLAG_FALLBACK,
True
)
@patch('pydoc.pager')
@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
use_fallback,
pipepager,
default_pager,
):
"""
Help with asserting about pager.
str_to_page: what you're paging
pager_cmd: the string you're passing to pipepager (or None)
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
util.page_string(str_to_page, pager_cmd)
if use_fallback:
default_pager.assert_called_once_with(str_to_page)
assert pipepager.call_count == 0
else:
assert default_pager.call_count == 0
pipepager.assert_called_once_with(
str_to_page,
cmd=pager_cmd
)
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_not_less(pipepager_mock):
"""
Do not fail when user hits ctrl-c while in pager.
"""
try:
util.page_string('page me plz', 'cat')
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pipepager_mock.assert_called_once_with('page me plz', cmd='cat')
@patch('eg.util.pydoc.pager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
"""
Do not fail when user hits ctrl-c while in pipepager.
"""
try:
util.page_string('page me plz', None)
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pager_mock.assert_called_once_with('page me plz')
def test_get_contents_from_files_handles_none():
"""
Empty string if no files.
"""
_helper_assert_file_contents(
[],
''
)
def test_get_contents_from_files_handles_one():
file_infos = [
{
'path': 'test/path',
'contents': 'contents of file'
}
]
combined_contents = 'contents of file'
_helper_assert_file_contents(
file_infos,
combined_contents
)
def test_get_contents_from_files_handles_multiple():
file_infos = [
{
'path': 'path/1',
'contents': 'foo\n'
},
{
'path': 'path/2/foo',
'contents': 'bar\n'
},
{
'path': 'another/path',
'contents': 'baz'
}
]
combined_contents = 'foo\nbar\nbaz'
_helper_assert_file_contents(
file_infos,
combined_contents
)
@patch('eg.util._get_contents_of_file')
def _helper_assert_file_contents(
file_infos,
target_contents,
get_contents_mock,
):
"""
Helper method to assert things about the get_contents_from_files method.
Does not actually hit the disk.
file_infos: array of { path, contents } dicts representing files. Array so
that we can assert proper order calling
target_contents: the final combined contents that should be returned by the
get_contents_from_files method.
"""
# This method will be used by the mock framework to return the right file
# contents based on the file name.
def return_file_contents(*args, **kwargs):
for file_info in file_infos:
if file_info['path'] == args[0]:
return file_info['contents']
raise TypeError('did not find path in test obj')
get_contents_mock.side_effect = return_file_contents
paths = [el['path'] for el in file_infos]
actual = util.get_contents_from_files(*paths)
assert actual == target_contents
@patch('eg.util.get_colorized_contents')
@patch('eg.util.get_squeezed_contents')
@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs,
colorized_contents,
squeezed_contents,
subbed_contents,
formatted_result,
sub_method,
squeeze_method,
color_method,
):
"""
Helper method to assist in asserting things about the
get_formatted_contents method.
starting_contents: the starting string that we are working with
use_color: True if we should use color
color_config: the color config to be passed to get_colorized_contents
squeeze: True if we should squeeze
subs: the list of Substitutions that we should pass to
get_substituted_contents
colored_contents: the result of get_colorized_contents
squeezed_contents: the result of get_squeezed_contents
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
sub_method.return_value = subbed_contents
squeeze_method.return_value = squeezed_contents
color_method.return_value = colorized_contents
actual = util.get_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs
)
# We'll update the contents as they get formatted to make sure
# we pass the right thing to the various methods.
contents_thus_far = starting_contents
if use_color:
color_method.assert_called_once_with(
contents_thus_far,
color_config
)
contents_thus_far = colorized_contents
else:
assert color_method.call_count == 0
if squeeze:
squeeze_method.assert_called_once_with(contents_thus_far)
contents_thus_far = squeezed_contents
else:
assert squeeze_method.call_count == 0
if subs:
sub_method.assert_called_once_with(
contents_thus_far,
subs
)
contents_thus_far = subbed_contents
else:
assert sub_method.call_count == 0
assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
We should invoke none of the formatter methods if the flags are false and
subs is not truthy.
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
None,
'this was colored',
'this was squeezed',
'these contents were subbed',
starting_contents
)
def test_get_formatted_contents_calls_colorize_if_use_color():
"""
Colorize the contents if use_color = True.
"""
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
False,
None,
colorized_contents,
'this was squeezed',
'these contents were subbed',
colorized_contents
)
def test_get_formatted_contents_squeezes():
"""If squeeze, we need to squeeze."""
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
True,
None,
'this was colored',
squeezed_contents,
'these contents were subbed',
squeezed_contents
)
def test_get_formatted_contents_subsitutes():
"""If subs is truthy, get_substituted contents should be called."""
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def test_perform_all_formatting():
"""
When use_color, squeeze, and subs are all truthy, all the formatting
should be applied in that order.
"""
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
True,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data
def test_get_squeezed_contents_correctly_squeezes():
"""
Our squeeze method should follow our convention, which is to remove the
blank line between a description and an example, to keep two blank lines
between sections, and otherwise have only single blank lines.
"""
unsqueezed = _get_file_as_string(PATH_UNSQUEEZED_FILE)
# the target squeezed output is a reference implementation in
# pwd_squeezed.md.
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
"""
The get_substituted_contents method calls things in the correct order.
"""
sub_one = Mock(auto_spec=substitute.Substitution)
sub_one_result = 'result of sub one'
sub_one.apply_and_get_result.return_value = sub_one_result
sub_two = Mock(auto_spec=substitute.Substitution)
sub_two_result = 'result of sub two'
sub_two.apply_and_get_result.return_value = sub_two_result
starting_contents = 'the string we should be substituting into'
target = sub_two_result
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(starting_contents, subs)
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
assert actual == target
def test_get_substituted_contents_substitutes_correctly():
"""
Basic test to make sure Substitutions can get applied correctly.
"""
sub_one = substitute.Substitution('foo', 'bar', False)
sub_two = substitute.Substitution('bar\n\n', 'baz\n', True)
start = 'foo\n\n something else\n\n bar\n\n'
target = 'baz\n something else\n\n baz\n'
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
assert actual == target
@patch('eg.color.EgColorizer')
def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
"""
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
# The actual instance created by these calls is stored at return_value.
colorizer_instance = patched_colorizer_class.return_value
colorizer_instance.colorize_text.return_value = colored_contents
actual = util.get_colorized_contents(raw_contents, color_config)
assert actual == colored_contents
colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
alias_dict,
mock_dict,
):
"""
program: the program to resolved for as an alias
resolved_program: the result of the resolution.
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
mock_dict.return_value = alias_dict
actual = util.get_resolved_program(program, config_obj)
assert actual == resolved_program
mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
"""
A program that is not an alias should return itself.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'a config'
_helper_assert_get_resolved_program('link', 'ln', config_obj, alias_dict)
def test_get_resolved_program_is_alias():
"""
A program that is an alias should return the resolved value.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'some new config'
_helper_assert_get_resolved_program('cp', 'cp', config_obj, alias_dict)
def test_get_alias_dict_returns_contents_of_correct_file():
"""
get_alias_dict should read data from the file at the default path.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/alias/file'
alias_dict_str = json.dumps(alias_dict)
_helper_assert_get_alias_dict(
alias_dict_str,
alias_dict,
config_obj,
alias_file_path,
True
)
def test_get_alias_dict_fails_gracefully_if_not_file():
"""
Since users can specify a directory for examples that might not contain the
aliases file, we want to fail gracefully if the file doesn't exist.
"""
contents_of_alias_dict_file = 'should never be reached'
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/the/alias/file'
_helper_assert_get_alias_dict(
contents_of_alias_dict_file,
{},
config_obj,
alias_file_path,
False
)
@patch('eg.util._get_contents_of_file')
@patch('eg.util._get_alias_file_path')
@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
alias_file_path_is_file,
mock_is_file,
mock_get_alias_file_path,
mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
dictionary of aliases
target_alias_dict: the target result of get_alias_dict
config_obj: the Config object
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
mock_is_file.return_value = alias_file_path_is_file
mock_get_alias_file_path.return_value = alias_file_path
mock_get_contents.return_value = contents_of_alias_dict_file
actual = util.get_alias_dict(config_obj)
assert actual == target_alias_dict
mock_get_alias_file_path.assert_called_once_with(config_obj)
mock_is_file.assert_called_once_with(alias_file_path)
if alias_file_path_is_file:
mock_get_contents.assert_called_once_with(alias_file_path)
else:
assert mock_get_contents.call_count == 0
@patch('os.path.join')
def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
"""
config_obj = _create_config(
examples_dir='handy/dandy/examples/dir',
)
join_result = 'joined path'
mock_join.return_value = join_result
actual = util._get_alias_file_path(config_obj)
assert actual == join_result
mock_join.assert_called_once_with(
config_obj.examples_dir,
util.ALIAS_FILE_NAME
)
def test_is_example_file_true_if_has_suffix():
"""
Should be true if ends in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
assert actual == True
def test_is_example_file_true_if_not_suffix():
"""
Should be false if the file does not end in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
assert actual == False
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should resolve aliases, get the custom file path, and call subprocess.
"""
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = ['path/to/custom/du.md', 'foo.md']
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_creates_file_if_none_exist(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
program = 'du'
resolved_program = 'alias-for-du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = []
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with(
[config.editor_cmd, 'path/to/custom/alias-for-du.md'])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should inform the user if they are trying to edit with no custom dir.
This should be true if it is not set and if the path does not exist.
"""
program = 'awk'
# First with no custom dir set.
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 2
assert mock_call.call_count == 0
assert mock_get_paths.call_count == 0
assert mock_get_program.call_count == 0
|
src/sage/symbolic/constant.py | UCD4IDS/sage | 1,742 | 44969 | <reponame>UCD4IDS/sage<filename>src/sage/symbolic/constant.py<gh_stars>1000+
from sage.misc.lazy_import import lazy_import
lazy_import('sage.symbolic.expression', 'PynacConstant', deprecation=32386)
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test23.py | YangHao666666/hawq | 450 | 44991 | 'doc'
class X:
'doc'
def __init__(self):
self.fff = 0
def x(self):
pass
def y(self):
'should generate a warning'
if self.x:
pass
if self.x and globals():
pass
if globals() and self.x:
pass
def z(self):
'should NOT generate a warning'
if globals() :
pass
if self.x() and self.fff:
pass
if self.x() and globals():
pass
if globals() and self.x():
pass
if self.fff:
pass
print self.x
print self.fff
class Y(X):
'doc'
def j(self):
'should generate a warning'
if self.x:
pass
print self.fff
def h(self):
'should NOT generate a warning'
if self.x():
pass
print self.x
print self.fff
|
deepy/layers/prelu.py | uaca/deepy | 260 | 44993 | <reponame>uaca/deepy<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from conv import Convolution
class PRelu(NeuralLayer):
"""
Probabilistic ReLU.
- http://arxiv.org/pdf/1502.01852v1.pdf
"""
def __init__(self, input_tensor=2):
super(PRelu, self).__init__("prelu")
self.input_tensor = input_tensor
def prepare(self):
self.alphas = self.create_bias(self.output_dim, "alphas")
self.register_parameters(self.alphas)
if self.input_tensor == 3:
self.alphas = self.alphas.dimshuffle('x', 0, 'x')
elif self.input_tensor == 4:
self.alphas = self.alphas.dimshuffle('x', 0, 'x', 'x')
def compute_tensor(self, x):
positive_vector = x * (x >= 0)
negative_vector = self.alphas * (x * (x < 0))
return positive_vector + negative_vector |
tools/hippydebug.py | jweinraub/hippyvm | 289 | 45016 | #!/usr/bin/env python
"""Hippy debugger.
Usage: hippydebug.py [debugger_options] ../hippy-c args...
(There are no debugger_options so far.)
"""
import sys, os, signal
import getopt
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from hippy.debugger import Connection, Message
def run_interactive(read_fd, write_fd):
import readline # for raw_input() below
con = Connection(read_fd, write_fd)
last_command = ''
while True:
try:
msg = con.read()
except EOFError:
break
if msg.command == '>':
line = raw_input('> ')
if not line: # Ctrl-D
break
line = line.strip()
if not line:
line = last_command
else:
last_command = line
lst = line.split(" ", 1)
if len(lst) == 1:
con.write(Message(lst[0], None))
else:
con.write(Message(lst[0], [lst[1]]))
else:
print msg.command, " ".join(msg.args)
con.write(Message(".", None))
def reopen_terminal():
f = open("/dev/tty", "r+", 0)
sys.stdin = sys.stdout = sys.stderr = f
os.dup2(f.fileno(), 0)
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
def printable_process_status(status):
if os.WIFEXITED(status):
return 'exit code %s' % (os.WEXITSTATUS(status),)
elif os.WIFSIGNALED(status):
return 'terminated by signal %s' % (os.WTERMSIG(status),)
else:
return 'unknown exit status 0x%x' % (status,)
def main(hippy_command, *hippy_args):
read_fd1, write_fd1 = os.pipe()
read_fd2, write_fd2 = os.pipe()
child_pid = os.fork()
if child_pid == 0: # in the child
os.close(read_fd1)
os.close(write_fd2)
hippy_command_list = [
hippy_command,
'--debugger_pipes', str(read_fd2), str(write_fd1),
] + list(hippy_args)
os.execvp(hippy_command, hippy_command_list)
# this point never reached
os.close(read_fd2)
os.close(write_fd1)
try:
reopen_terminal()
print >> sys.stderr, 'Hippy Debugger'
run_interactive(read_fd1, write_fd2)
finally:
os.kill(child_pid, signal.SIGQUIT)
print >> sys.stderr, 'Hippy finished:',
_, status = os.waitpid(child_pid, 0)
print >> sys.stderr, printable_process_status(status)
if __name__ == '__main__':
options, args = getopt.getopt(sys.argv[1:], '', [])
if not args:
print >> sys.stderr, __doc__
sys.exit(1)
if not os.path.isfile(args[0]):
print >> sys.stderr, '%s: No such file' % (args[0],)
sys.exit(1)
main(*args)
|
RecoBTag/PerformanceDB/python/measure/Btag_btagTtbarWp0612.py | ckamtsikis/cmssw | 852 | 45039 | import FWCore.ParameterSet.Config as cms
BtagPerformanceESProducer_TTBARWPBTAGCSVL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGCSVM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGCSVT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGCSVT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGCSVTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGCSVTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHEMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHEMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHET = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHET'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHETtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHETwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGSSVHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGSSVHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGSSVHPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHPTwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHEL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHEL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHELtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHELwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHEMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHEMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHET = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHET'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHETtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHETwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPLtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPLwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPMtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPMwp_v8_offline')
)
BtagPerformanceESProducer_TTBARWPBTAGTCHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('TTBARWPBTAGTCHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagTTBARWPBTAGTCHPTtable_v8_offline'),
WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPTwp_v8_offline')
)
|
pycg/machinery/imports.py | WenJinfeng/PyCG | 121 | 45068 | #
# Copyright (c) 2020 <NAME>.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import ast
import os
import importlib
import copy
from pycg import utils
def get_custom_loader(ig_obj):
"""
Closure which returns a custom loader
that modifies an ImportManager object
"""
class CustomLoader(importlib.abc.SourceLoader):
def __init__(self, fullname, path):
self.fullname = fullname
self.path = path
ig_obj.create_edge(self.fullname)
if not ig_obj.get_node(self.fullname):
ig_obj.create_node(self.fullname)
ig_obj.set_filepath(self.fullname, self.path)
def get_filename(self, fullname):
return self.path
def get_data(self, filename):
return ""
return CustomLoader
class ImportManager(object):
def __init__(self):
self.import_graph = dict()
self.current_module = ""
self.input_file = ""
self.mod_dir = None
self.old_path_hooks = None
self.old_path = None
def set_pkg(self, input_pkg):
self.mod_dir = input_pkg
def get_mod_dir(self):
return self.mod_dir
def get_node(self, name):
if name in self.import_graph:
return self.import_graph[name]
def create_node(self, name):
if not name or not isinstance(name, str):
raise ImportManagerError("Invalid node name")
if self.get_node(name):
raise ImportManagerError("Can't create a node a second time")
self.import_graph[name] = {"filename": "", "imports": set()}
return self.import_graph[name]
def create_edge(self, dest):
if not dest or not isinstance(dest, str):
raise ImportManagerError("Invalid node name")
node = self.get_node(self._get_module_path())
if not node:
raise ImportManagerError("Can't add edge to a non existing node")
node["imports"].add(dest)
def _clear_caches(self):
importlib.invalidate_caches()
sys.path_importer_cache.clear()
# TODO: maybe not do that since it empties the whole cache
for name in self.import_graph:
if name in sys.modules:
del sys.modules[name]
def _get_module_path(self):
return self.current_module
def set_current_mod(self, name, fname):
self.current_module = name
self.input_file = os.path.abspath(fname)
def get_filepath(self, modname):
if modname in self.import_graph:
return self.import_graph[modname]["filename"]
def set_filepath(self, node_name, filename):
if not filename or not isinstance(filename, str):
raise ImportManagerError("Invalid node name")
node = self.get_node(node_name)
if not node:
raise ImportManagerError("Node does not exist")
node["filename"] = os.path.abspath(filename)
def get_imports(self, modname):
if not modname in self.import_graph:
return []
return self.import_graph[modname]["imports"]
def _is_init_file(self):
return self.input_file.endswith("__init__.py")
def _handle_import_level(self, name, level):
# add a dot for each level
package = self._get_module_path().split(".")
if level > len(package):
raise ImportError("Attempting import beyond top level package")
mod_name = ("." * level) + name
# When an __init__ file is analyzed, then the module name doesn't contain
# the __init__ part in it, so special care must be taken for levels.
if self._is_init_file() and level >= 1:
if level != 1:
level -= 1
package = package[:-level]
else:
package = package[:-level]
return mod_name, ".".join(package)
def _do_import(self, mod_name, package):
if mod_name in sys.modules:
self.create_edge(mod_name)
return sys.modules[mod_name]
return importlib.import_module(mod_name, package=package)
def handle_import(self, name, level):
# We currently don't support builtin modules because they're frozen.
# Add an edge and continue.
# TODO: identify a way to include frozen modules
root = name.split(".")[0]
if root in sys.builtin_module_names:
self.create_edge(root)
return
# Import the module
try:
mod_name, package = self._handle_import_level(name, level)
except ImportError:
return
parent = ".".join(mod_name.split(".")[:-1])
parent_name = ".".join(name.split(".")[:-1])
combos = [(mod_name, package),
(parent, package),
(utils.join_ns(package, name), ""),
(utils.join_ns(package, parent_name), "")]
mod = None
for mn, pkg in combos:
try:
mod = self._do_import(mn, pkg)
break
except:
continue
if not mod:
return
if not hasattr(mod, "__file__") or not mod.__file__:
return
if self.mod_dir not in mod.__file__:
return
fname = mod.__file__
if fname.endswith("__init__.py"):
fname = os.path.split(fname)[0]
return utils.to_mod_name(
os.path.relpath(fname, self.mod_dir))
def get_import_graph(self):
return self.import_graph
def install_hooks(self):
loader = get_custom_loader(self)
self.old_path_hooks = copy.deepcopy(sys.path_hooks)
self.old_path = copy.deepcopy(sys.path)
loader_details = loader, importlib.machinery.all_suffixes()
sys.path_hooks.insert(0, importlib.machinery.FileFinder.path_hook(loader_details))
sys.path.insert(0, os.path.abspath(self.mod_dir))
self._clear_caches()
def remove_hooks(self):
sys.path_hooks = self.old_path_hooks
sys.path = self.old_path
self._clear_caches()
class ImportManagerError(Exception):
pass
|
src/products/migrations/0006_ShippableFullName.py | denkasyanov/education-backend | 151 | 45086 | <gh_stars>100-1000
# Generated by Django 2.2.7 on 2019-11-15 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_ClickMeeetingRoomURL'),
]
operations = [
migrations.AddField(
model_name='course',
name='full_name',
field=models.CharField(default='', help_text='Билет на мастер-класс о TDD или «запись курсов кройки и шитья»', max_length=255, verbose_name='Full name for letters'),
preserve_default=False,
),
migrations.AddField(
model_name='record',
name='full_name',
field=models.CharField(default='', help_text='«Запись мастер-класса о TDD»', max_length=255, verbose_name='Full name for letters'),
preserve_default=False,
),
migrations.AlterField(
model_name='course',
name='name_genitive',
field=models.CharField(help_text='«мастер-класса о TDD». К примеру для записей.', max_length=255, verbose_name='Genitive name'),
),
migrations.AlterField(
model_name='course',
name='name_receipt',
field=models.CharField(help_text='«посещение мастер-класса по TDD» или «Доступ к записи курсов кройки и шитья»', max_length=255, verbose_name='Name for receipts'),
),
migrations.AlterField(
model_name='record',
name='name_receipt',
field=models.CharField(help_text='«Доступ к записи курсов кройки и шитья»', max_length=255, verbose_name='Name for receipts'),
),
]
|
samsungctl/upnp/UPNP_Device/upnp_class.py | p3g4asus/samsungctl | 135 | 45093 | # -*- coding: utf-8 -*-
import requests
import os
from lxml import etree
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from .xmlns import strip_xmlns
from .service import Service
from .embedded_device import EmbeddedDevice
from .instance_singleton import InstanceSingleton
except ImportError:
from xmlns import strip_xmlns
from service import Service
from embedded_device import EmbeddedDevice
from instance_singleton import InstanceSingleton
class UPNPObject(object):
def __init__(self, ip, locations, dump=''):
self.ip_address = ip
self._devices = {}
self._services = {}
for location in locations:
parsed_url = urlparse(location)
url = parsed_url.scheme + '://' + parsed_url.netloc
response = requests.get(location)
content = response.content.decode('utf-8')
if dump:
path = location
if path.startswith('/'):
path = path[1:]
if '/' in path:
path, file_name = path.rsplit('/', 1)
path = os.path.join(dump, path)
else:
file_name = path
path = dump
if not os.path.exists(path):
os.makedirs(path)
if not file_name.endswith('.xml'):
file_name += '.xml'
with open(os.path.join(path, file_name), 'w') as f:
f.write(content)
try:
root = etree.fromstring(content)
except etree.XMLSyntaxError:
continue
root = strip_xmlns(root)
node = root.find('device')
services = node.find('serviceList')
if services is None:
services = []
devices = node.find('deviceList')
if devices is None:
devices = []
for service in services:
scpdurl = service.find('SCPDURL').text.replace(url, '')
control_url = service.find('controlURL').text
if control_url is None:
if scpdurl.endswith('.xml'):
control_url = scpdurl.rsplit('/', 1)[0]
if control_url == scpdurl:
control_url = ''
else:
control_url = scpdurl
else:
control_url = control_url.replace(url, '')
service_id = service.find('serviceId').text
service_type = service.find('serviceType').text
service = Service(
self,
url,
scpdurl,
service_type,
control_url,
node,
dump=dump
)
name = service_id.split(':')[-1]
service.__name__ = name
self._services[name] = service
for device in devices:
device = EmbeddedDevice(
url,
node=device,
parent=self,
dump=dump
)
self._devices[device.__name__] = device
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
if item in self._devices:
return self._devices[item]
if item in self._services:
return self._services[item]
if item in self.__class__.__dict__:
if hasattr(self.__class__.__dict__[item], 'fget'):
return self.__class__.__dict__[item].fget(self)
raise AttributeError(item)
@property
def as_dict(self):
res = dict(
services=list(service.as_dict for service in self.services),
devices=list(device.as_dict for device in self.devices)
)
return res
@property
def access_point(self):
return self.__class__.__name__
@property
def services(self):
return list(self._services.values())[:]
@property
def devices(self):
return list(self._devices.values())[:]
def __str__(self):
output = '\n\n' + str(self.__name__) + '\n'
output += 'IP Address: ' + self.ip_address + '\n'
output += '==============================================\n'
if self.services:
output += 'Services:\n'
for cls in self.services:
output += cls.__str__(indent=' ').rstrip() + '\n'
else:
output += 'Services: None\n'
if self.devices:
output += 'Devices:\n'
for cls in self.devices:
output += cls.__str__(indent=' ').rstrip() + '\n'
else:
output += 'Devices: None\n'
return output
|
src/condor_tests/ornithology/io.py | sridish123/htcondor | 217 | 45102 | <reponame>sridish123/htcondor
# Copyright 2019 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import textwrap
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# TODO: does this way of doing permissions work on Windows?
def write_file(path: Path, text: str, permissions: int = 0o777) -> Path:
"""
Write the given ``text`` to a new file at the given ``path``, stomping
anything that might exist there.
Parameters
----------
path
The path to write to.
text
The text to write.
permissions
The permissions to give the file.
Returns
-------
path : pathlib.Path
The path the file was written to (as an absolute path).
"""
path = Path(path).absolute()
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(textwrap.dedent(text))
path.chmod(permissions)
return path
|
magma/t.py | leonardt/magma | 167 | 45201 | <filename>magma/t.py
import abc
import enum
from magma.common import deprecated
from magma.compatibility import IntegerTypes, StringTypes
from magma.ref import AnonRef, NamedRef, TempNamedRef, DefnRef, InstRef
from magma.protocol_type import magma_value
from magma.wire import wire
class Direction(enum.Enum):
In = 0
Out = 1
InOut = 2
Undirected = 3
class Type(object):
def __init__(self, name=None):
if name is None:
name = AnonRef()
elif isinstance(name, str):
name = TempNamedRef(name=name, value=self)
self.name = name
__hash__ = object.__hash__
def __repr__(self):
if self.name.anon():
return f"{type(self)}()"
has_name = (isinstance(self.name, NamedRef) and
not isinstance(self.name, (InstRef, DefnRef)))
if has_name:
return f"{type(self)}(name=\"{repr(self.name)}\")"
return repr(self.name)
def __str__(self):
if self.name.anon():
# Anon names aren't very useful, so just return a repr instead so
# it's easier to find the value
return repr(self)
return str(self.name)
# An instance has an anon name.
def anon(self):
return self.name.anon()
# Abstract method to be implemented by subclasses.
@classmethod
def is_oriented(cls, direction):
raise NotImplementedError()
@classmethod
def is_clock(cls):
raise NotImplementedError()
@classmethod
def is_input(cls):
return cls.is_oriented(Direction.In)
@classmethod
def is_output(cls):
return cls.is_oriented(Direction.Out)
@classmethod
def is_inout(cls):
return cls.is_oriented(Direction.InOut)
@classmethod
@deprecated
def isoriented(cls, direction):
return cls.is_oriented(direction)
@classmethod
@deprecated
def isinput(cls):
return cls.is_input()
@classmethod
@deprecated
def isoutput(cls):
return cls.is_output()
@classmethod
@deprecated
def isinout(cls):
return cls.is_inout()
@property
def debug_name(self):
defn_str = ""
inst_str = ""
if isinstance(self.name, DefnRef):
defn_str = str(self.name.defn.name) + "."
elif isinstance(self.name, InstRef):
inst_str = str(self.name.inst.name) + "."
defn_str = str(self.name.inst.defn.name) + "."
return f"{defn_str}{inst_str}{str(self)}"
def __le__(self, other):
if self.is_output():
raise TypeError(f"Cannot use <= to assign to output: "
f"{self.debug_name} (trying to assign "
f"{other.debug_name})")
wire(other, self)
def __imatmul__(self, other):
other = magma_value(other)
if self.is_output():
raise TypeError(f"Cannot use @= to assign to output: {self} "
f"(trying to assign {other})")
wire(other, self)
return self
@abc.abstractmethod
def unused(self):
# Mark value is unused by calling unused on the underlying magma
# elements. For example, m.Bit is wired up to a coreir term primitive A
# general m.Array and m.Tuple will recursively call `unused` on its
# members.
raise NotImplementedError()
@abc.abstractmethod
def undriven(self):
# Mark value is undriven by calling undriven on the underlying magma
# elements. For example, m.Bit is wired up to a coreir undriven
# primitive A general m.Array and m.Tuple will recursively call
# `undriven` on its members.
raise NotImplementedError()
def is_driven_anon_temporary(self):
"""
Returns true if this is an anonymous temporary value (not an output)
that is driven
"""
return self.name.anon() and not self.is_output() and self.driven()
class Kind(type):
# Subclasses only need to implement one of these methods.
def __eq__(cls, rhs):
return cls is rhs
__hash__ = type.__hash__
def __repr__(cls):
return cls.__name__
def __str__(cls):
return cls.__name__
@abc.abstractmethod
def qualify(cls, direction):
raise NotImplementedError()
def flip(cls):
if cls.direction == Direction.In:
return cls[Direction.Out]
if cls.direction == Direction.Out:
return cls[Direction.In]
# Flip of inout is inout, and flip of undirected is undirected.
return cls
@property
def undirected_t(cls):
return cls.qualify(Direction.Undirected)
@property
def is_directed(cls):
return cls is not cls.qualify(Direction.Undirected)
def In(T):
return T.qualify(Direction.In)
def Out(T):
return T.qualify(Direction.Out)
def InOut(T):
return T.qualify(Direction.InOut)
def Flip(T):
return T.flip()
|
tests/fixtures/script-files/sample_script.py | avoltz/poetry-core | 205 | 45209 | <gh_stars>100-1000
#!/usr/bin/env python
hello = "Hello World!"
|
hvplot/tests/testgridplots.py | lhoupert/hvplot | 338 | 45224 | <reponame>lhoupert/hvplot
from unittest import SkipTest
from collections import OrderedDict
import numpy as np
from holoviews import Store
from holoviews.element import RGB, Image
from holoviews.element.comparison import ComparisonTestCase
try:
import xarray as xr
except:
raise SkipTest('XArray not available')
else:
import hvplot.xarray # noqa
class TestGridPlots(ComparisonTestCase):
def setUp(self):
coords = OrderedDict([('band', [1, 2, 3]), ('y', [0, 1]), ('x', [0, 1])])
self.da_rgb = xr.DataArray(np.arange(12).reshape((3, 2, 2)),
coords, ['band', 'y', 'x'])
coords = OrderedDict([('time', [0, 1]), ('band', [1, 2, 3]), ('y', [0, 1]), ('x', [0, 1])])
self.da_rgb_by_time = xr.DataArray(np.arange(24).reshape((2, 3, 2, 2)),
coords, ['time', 'band', 'y', 'x'])
coords = OrderedDict([('time', [0, 1]), ('lat', [0, 1]), ('lon', [0, 1])])
self.da_img_by_time = xr.DataArray(np.arange(8).reshape((2, 2, 2)),
coords, ['time', 'lat', 'lon']).assign_coords(
lat1=xr.DataArray([2,3], dims=['lat']))
self.xarr_with_attrs = xr.DataArray(
np.random.rand(10, 10), coords=[('x', range(10)), ('y', range(10))],
dims=['y', 'x'], attrs={'long_name': 'luminosity', 'units': 'lm'})
self.xarr_with_attrs.x.attrs['long_name'] = 'Declination'
self.xarr_with_attrs.y.attrs['long_name'] = 'Right Ascension'
self.xds_with_attrs = xr.Dataset({'light': self.xarr_with_attrs })
self.da_img = xr.DataArray(np.arange(-2, 2).reshape((2, 2)), name='foo')
self.big_img = xr.DataArray(np.arange(-1e6, 1e6).reshape(1000, 2000))
self.ds = xr.Dataset({
'temp': (('lon', 'lat'), 15 + 8 * np.random.randn(2, 2)),
'precip': (('lon', 'lat'), 10 * np.random.rand(2, 2))},
coords={'lon': [-99.83, -99.32],'lat': [42.25, 42.21]})
xs = np.linspace(0, 10, 5)
lon = xs*xs[np.newaxis, :].T
lat = xs+xs[:, np.newaxis]
coords = {
'lon': (('ny', 'nx'), lon),
'lat': (('ny', 'nx'), lat),
'time': [1, 2, 3],
'samples': ('nsamples', [0, 1, 2, 3])
}
self.ds_unindexed = xr.DataArray(
np.random.rand(5, 5, 3, 4), coords=coords, dims=('nx', 'ny', 'time', 'nsamples')
)
def test_rgb_dataarray_no_args(self):
rgb = self.da_rgb.hvplot()
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
def test_rgb_dataarray_explicit_args(self):
rgb = self.da_rgb.hvplot('x', 'y')
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
def test_rgb_dataarray_explicit_args_and_kind(self):
rgb = self.da_rgb.hvplot.rgb('x', 'y')
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
def test_rgb_dataset(self):
rgb = self.da_rgb.to_dataset(name='z').hvplot.rgb()
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
def test_rgb_dataset_explicit_z(self):
rgb = self.da_rgb.to_dataset(name='z').hvplot.rgb(z='z')
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
def test_rgb_dataarray_groupby_explicit(self):
rgb = self.da_rgb_by_time.hvplot.rgb('x', 'y', groupby='time')
self.assertEqual(rgb[0], RGB(([0, 1], [0, 1])+tuple(self.da_rgb_by_time.values[0])))
self.assertEqual(rgb[1], RGB(([0, 1], [0, 1])+tuple(self.da_rgb_by_time.values[1])))
def test_rgb_dataarray_groupby_infer(self):
rgb = self.da_rgb_by_time.hvplot.rgb('x', 'y', bands='band')
self.assertEqual(rgb[0], RGB(([0, 1], [0, 1])+tuple(self.da_rgb_by_time.values[0])))
self.assertEqual(rgb[1], RGB(([0, 1], [0, 1])+tuple(self.da_rgb_by_time.values[1])))
def test_img_dataarray_infers_correct_other_dims(self):
img = self.da_img_by_time[0].hvplot()
self.assertEqual(img, Image(self.da_img_by_time[0], ['lon', 'lat'], ['value']))
def test_img_dataarray_groupby_infers_correct_other_dims(self):
img = self.da_img_by_time.hvplot(groupby='time')
self.assertEqual(img[0], Image(self.da_img_by_time[0], ['lon', 'lat'], ['value']))
self.assertEqual(img[1], Image(self.da_img_by_time[1], ['lon', 'lat'], ['value']))
def test_line_infer_dimension_params_from_xarray_attrs(self):
hmap = self.xarr_with_attrs.hvplot.line(groupby='x', dynamic=False)
self.assertEqual(hmap.kdims[0].label, 'Declination')
self.assertEqual(hmap.last.kdims[0].label, 'Right Ascension')
self.assertEqual(hmap.last.vdims[0].label, 'luminosity')
self.assertEqual(hmap.last.vdims[0].unit, 'lm')
def test_img_infer_dimension_params_from_xarray_attrs(self):
img = self.xarr_with_attrs.hvplot.image(clim=(0, 2))
self.assertEqual(img.kdims[0].label, 'Declination')
self.assertEqual(img.kdims[1].label, 'Right Ascension')
self.assertEqual(img.vdims[0].label, 'luminosity')
self.assertEqual(img.vdims[0].unit, 'lm')
self.assertEqual(img.vdims[0].range, (0, 2))
def test_table_infer_dimension_params_from_xarray_ds_attrs(self):
table = self.xds_with_attrs.hvplot.dataset()
self.assertEqual(table.kdims[0].label, 'Declination')
self.assertEqual(table.kdims[1].label, 'Right Ascension')
self.assertEqual(table.vdims[0].label, 'luminosity')
self.assertEqual(table.vdims[0].unit, 'lm')
def test_points_infer_dimension_params_from_xarray_attrs(self):
points = self.xarr_with_attrs.hvplot.points(c='value', clim=(0, 2))
self.assertEqual(points.kdims[0].label, 'Declination')
self.assertEqual(points.kdims[1].label, 'Right Ascension')
self.assertEqual(points.vdims[0].label, 'luminosity')
self.assertEqual(points.vdims[0].unit, 'lm')
self.assertEqual(points.vdims[0].range, (0, 2))
def test_dataset_infer_dimension_params_from_xarray_attrs(self):
ds = self.xarr_with_attrs.hvplot.dataset()
self.assertEqual(ds.kdims[0].label, 'Declination')
self.assertEqual(ds.kdims[1].label, 'Right Ascension')
self.assertEqual(ds.vdims[0].label, 'luminosity')
self.assertEqual(ds.vdims[0].unit, 'lm')
def test_table_infer_dimension_params_from_xarray_attrs(self):
table = self.xarr_with_attrs.hvplot.dataset()
self.assertEqual(table.kdims[0].label, 'Declination')
self.assertEqual(table.kdims[1].label, 'Right Ascension')
self.assertEqual(table.vdims[0].label, 'luminosity')
self.assertEqual(table.vdims[0].unit, 'lm')
def test_symmetric_img_deduces_symmetric(self):
plot = self.da_img.hvplot.image()
plot_opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(plot_opts.kwargs.get('symmetric'), True)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'coolwarm')
def test_symmetric_img_with_symmetric_set_to_false(self):
plot = self.da_img.hvplot.image(symmetric=False)
plot_opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(plot_opts.kwargs.get('symmetric'), False)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'kbc_r')
def test_symmetric_img_with_cmap_set(self):
plot = self.da_img.hvplot.image(cmap='fire')
plot_opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(plot_opts.kwargs.get('symmetric'), True)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'fire')
def test_symmetric_with_big_img_sets_symmetric_to_false_without_calculating(self):
plot = self.big_img.hvplot.image()
plot_opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(plot_opts.kwargs.get('symmetric'), False)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'kbc_r')
def test_symmetric_with_big_img_and_check_symmetric_max_calculates_symmetric(self):
plot = self.big_img.hvplot.image(check_symmetric_max=int(1e7))
plot_opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(plot_opts.kwargs.get('symmetric'), True)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'coolwarm')
def test_multiple_zs(self):
plot = self.ds.hvplot(x='lat', y='lon', z=['temp', 'precip'], dynamic=False)
assert 'temp' in plot.keys()
assert 'precip' in plot.keys()
assert plot['temp'].kdims == ['lat', 'lon']
assert plot['precip'].kdims == ['lat', 'lon']
def test_unindexed_quadmesh(self):
plot = self.ds_unindexed.hvplot.quadmesh(x='lon', y='lat')
assert len(plot.kdims) == 2
assert plot.kdims[0].name == 'time'
assert plot.kdims[1].name == 'nsamples'
p = plot[1, 0]
assert len(p.kdims) == 2
assert p.kdims[0].name == 'lon'
assert p.kdims[1].name == 'lat'
|
src/karta_manual_anchor.py | CrackerCat/Karta | 716 | 45233 | #!/usr/bin/python
from config.utils import *
from elementals import Prompter
from function_context import SourceContext, BinaryContext, IslandContext
import os
import sys
import argparse
import logging
from collections import defaultdict
def recordManualAnchors(library_config, knowledge_config, lib_name, prompter):
"""Record the list of user defined manual anchor matches.
Args:
library_config (json): json loaded data from the library's configuration
knowledge_config (dict): a mapping of all of the accumulated knowledge for the currently analysed binary
lib_name (str): name of the open source library that will contain these manual anchors
prompter (prompter): prompter instance
Return Value:
Updated knowledge mapping (to be stored back as a *json file)
"""
# Prepare & load the stats from each file (using the functions file)
src_file_names = []
prompter.info("Loading the information regarding the compiled source files")
prompter.addIndent()
files_config = library_config[JSON_TAG_FILES]
for full_file_path in files_config:
prompter.debug(f"Parsing the canonical representation of file: {full_file_path.split(os.path.sep)[-1]}")
src_file_names.append(full_file_path)
parseFileStats(full_file_path, files_config[full_file_path])
prompter.removeIndent()
# get the variables from the utils file
src_functions_list, src_functions_ctx, src_file_mappings = getSourceFunctions()
# pre-processed list indices (efficiency improvement)
func_indices = defaultdict(list)
for func_idx, func_name in enumerate(src_functions_list):
func_indices[func_name].append(func_idx)
# Start requesting the user to add his manual records
manual_anchors = {}
prompter.info("Starting the input loop")
prompter.addIndent()
finished = False
while not finished:
prompter.info("Enter the details for the current manual anchor:")
parsed_correctly = True
while parsed_correctly:
function_name = prompter.input("Function Name (case sensitive): ")
# check existence
if src_functions_list.count(function_name) == 0:
prompter.error(f"Function \"{function_name}\" does not exist")
parsed_correctly = False
break
# check uniqueness
if src_functions_list.count(function_name) > 1:
file_name = prompter.input("File Name (case sensitive): ")
src_indices = list(filter(lambda x: src_functions_ctx[x].file == file_name, func_indices[function_name]))
if len(src_indices) == 0:
prompter.error(f"Function \"{function_name}\" does not exist in file \"{file_name}\"")
parsed_correctly = False
break
src_index = src_indices[0]
else:
src_index = func_indices[function_name][0]
# get the binary address
bin_ea_str_raw = prompter.input("Function Address (ea in the form: 0x12345678): ")
if bin_ea_str_raw.startswith("0x"):
bin_ea_str = bin_ea_str_raw[2:]
else:
bin_ea_str = bin_ea_str_raw
try:
bin_ea = int(bin_ea_str, 16)
except ValueError:
prompter.error(f"Illegal hexa address: \"{bin_ea_str_raw}\"")
parsed_correctly = False
break
# finished successfully :)
manual_anchors[src_index] = bin_ea
break
should_continue = prompter.input("Do you want to add another manual anchor? <Y/N>: ")
finished = should_continue.lower() != "y"
prompter.removeIndent()
# add the info to the json
if len(manual_anchors) > 0:
if JSON_TAG_MANUAL_ANCHORS not in knowledge_config:
knowledge_config[JSON_TAG_MANUAL_ANCHORS] = {}
all_manual_anchors = knowledge_config[JSON_TAG_MANUAL_ANCHORS]
if lib_name not in all_manual_anchors:
all_manual_anchors[lib_name] = {}
cur_manual_anchors = all_manual_anchors[lib_name]
# merge the results
for new_index in manual_anchors:
src_ctx = src_functions_ctx[new_index]
cur_manual_anchors[str(new_index)] = [src_ctx.file, src_ctx.name, hex(manual_anchors[new_index]), manual_anchors[new_index]]
# return back the data
return knowledge_config
def main(args):
"""Run the manual anchors script.
Args:
args (list): list of command line arguments
"""
global disas_cmd
# argument parser
parser = argparse.ArgumentParser(description=f"Enables the user to manually defined matches, acting as manual anchors, later to be used by {LIBRARY_NAME}'s Matcher.")
parser.add_argument("bin", metavar="bin", type=str,
help="path to the disassembler's database for the wanted binary")
parser.add_argument("name", metavar="lib-name", type=str,
help="name (case sensitive) of the relevant open source library")
parser.add_argument("version", metavar="lib-version", type=str,
help="version string (case sensitive) as used by the identifier")
parser.add_argument("config", metavar="configs", type=str,
help="path to the *.json \"configs\" directory")
parser.add_argument("-D", "--debug", action="store_true", help="set logging level to logging.DEBUG")
parser.add_argument("-W", "--windows", action="store_true", help="signals that the binary was compiled for Windows")
# parse the args
args = parser.parse_args(args)
library_name = args.name
library_version = args.version
bin_path = args.bin
config_path = args.config
is_debug = args.debug
is_windows = args.windows
# open the log
prompter = Prompter(min_log_level=logging.INFO if not is_debug else logging.DEBUG)
prompter.info("Starting the Script")
# use the user supplied flag
if is_windows:
setWindowsMode()
# always init the utils before we start
initUtils(prompter, None, invoked_before=True)
# register our contexts
registerContexts(SourceContext, BinaryContext, IslandContext)
# Load the information from the relevant library
lib_config_file = constructConfigPath(library_name, library_version)
prompter.debug(f"Loading the configuration file for library: {library_name}")
prompter.addIndent()
cur_config_path = os.path.join(config_path, lib_config_file)
if not os.path.exists(cur_config_path):
prompter.error(f"Missing configuration file ({lib_config_file}) for \"{library_name}\" Version: \"{library_version}\"")
return
# Load the configuration file
fd = open(cur_config_path, "r")
library_config = json.load(fd)
fd.close()
prompter.removeIndent()
# Load the existing knowledge config, if exists
prompter.debug(f"Opening knowledge configuration file from path: {accumulatedKnowledgePath(bin_path)}")
prompter.addIndent()
knowledge_config = loadKnowledge(bin_path)
if knowledge_config is None:
prompter.debug("Failed to find an existing configuration file")
knowledge_config = {}
prompter.removeIndent()
# receive all of the couples from the user
knowledge_config = recordManualAnchors(library_config, knowledge_config, library_name, prompter)
prompter.info("Storing the data to the knowledge configuration file")
storeKnowledge(knowledge_config, bin_path)
# finished
prompter.info("Finished Successfully")
if __name__ == "__main__":
main(sys.argv[1:])
|
test/com/facebook/buck/parser/testdata/python_user_defined_rules/errors/invalid_attr_name/bad_attrs.bzl | Unknoob/buck | 8,027 | 45276 | <reponame>Unknoob/buck
def _impl(_ctx):
pass
bad_attrs = rule(implementation = _impl, attrs = {"1234isntvalid": attr.int()})
|
scripts/misc/redis_test.py | cclauss/archai | 344 | 45288 | import redis
redis_client = redis.StrictRedis(host="127.0.0.1", port=6379)
input("") |
tests/test_analytics.py | lakshyaag/csgo | 118 | 45292 | import pytest
import numpy as np
from csgo.analytics.distance import (
bombsite_distance,
point_distance,
polygon_area,
area_distance,
)
from csgo.analytics.coords import Encoder
class TestCSGOAnalytics:
"""Class to test CSGO analytics"""
def test_bombsite_distance(self):
"""Test bombsite distance function."""
assert bombsite_distance([0, 0, 0]) == 35
assert bombsite_distance([0, 0, 0], bombsite="B") == 38
assert bombsite_distance([0, 0, 0], bombsite="A", map="de_inferno") == 30
def test_point_distance(self):
"""Test point distance function"""
assert point_distance([0, 0], [1, 1], type="euclidean") == 1.4142135623730951
assert point_distance([0, 0], [1, 1], type="manhattan") == 2
assert point_distance([0, 0], [1, 1], type="canberra") == 2.0
assert point_distance([-1, 5], [2, 1], type="cosine") == 0.7368825942078912
assert point_distance([0, 0, 0], [100, 100, 100]) == 4
assert point_distance([0, 0, 0], [100, 100, 100], map="de_vertigo") == 1
def test_polygon_area(self):
"""Test polygon area function"""
assert polygon_area([0, 1, 2], [0, 1, 0]) == 1.0
def test_bombsite_invalid_map(self):
"""
Test bombsite function with an invalid map.
"""
with pytest.raises(ValueError):
bombsite_distance([0, 0, 0], map="dust2")
def test_point_invalid_map(self):
"""
Test point distance function with an invalid map.
"""
with pytest.raises(ValueError):
point_distance([0, 0, 0], [1, 1, 1], map="dust2")
def test_area_invalid_map(self):
"""
Test area distance function with an invalid map.
"""
with pytest.raises(ValueError):
area_distance(26, 42, map="dust2")
def test_area_dist(self):
"""
Tests that area distance returns correct value.
"""
assert area_distance(26, 42, map="de_mirage") == 26
def test_place_encode(self):
"""
Tests that place encoding works for correct values
"""
e = Encoder()
assert np.sum(e.encode("place", "TSpawn")) == 1
assert np.sum(e.encode("place", "TSpawnnn")) == 0
assert np.sum(e.encode("map", "de_dust2")) == 1
assert np.sum(e.encode("map", "de_dust0")) == 0
|
text/src/autogluon/text/text_prediction/infer_types.py | zhiqiangdon/autogluon | 4,462 | 45305 | <gh_stars>1000+
import collections
import pandas as pd
import warnings
from typing import Union, Optional, List, Dict, Tuple
from autogluon.core.constants import MULTICLASS, BINARY, REGRESSION
from .constants import NULL, CATEGORICAL, NUMERICAL, TEXT
#TODO, This file may later be merged with the infer type logic in tabular.
def is_categorical_column(data: pd.Series,
valid_data: pd.Series,
threshold: int = None,
ratio: Optional[float] = None,
oov_ratio_threshold: Optional[float] = None,
is_label: bool = False) -> bool:
"""Check whether the column is a categorical column.
If the number of unique elements in the column is smaller than
min(#Total Sample * ratio, threshold),
it will be treated as a categorical column.
Parameters
----------
data
The column data
valid_data
Additional validation data
threshold
The threshold for detecting categorical column
ratio
The ratio detecting categorical column
oov_ratio_threshold
The out-of-vocabulary ratio between training and validation.
This is used to determine if the column is a categorical column.
Usually, a categorical column can tolerate a small OOV ratio
is_label
Whether the column is a label column.
Returns
-------
is_categorical
Whether the column is a categorical column
"""
if data.dtype.name == 'category':
return True
else:
if threshold is None:
if is_label:
threshold = 100
oov_ratio_threshold = 0
ratio = 0.1
else:
threshold = 20
oov_ratio_threshold = 0
ratio = 0.1
threshold = min(int(len(data) * ratio), threshold)
data_value_counts = data.value_counts(dropna=False)
key_set = set(data_value_counts.keys())
if len(data_value_counts) < threshold:
valid_value_counts = valid_data.value_counts(dropna=False)
total_valid_num = len(valid_data)
oov_num = 0
for k, v in zip(valid_value_counts.keys(), valid_value_counts.values):
if k not in key_set:
oov_num += v
if is_label and oov_num != 0:
return False
if oov_num / total_valid_num > oov_ratio_threshold:
return False
return True
return False
def is_numerical_column(data: pd.Series,
valid_data: Optional[pd.Series] = None):
"""Try to identify if a column is a numerical column.
We adopted a very simple rule to verify if the column is a numerical column.
Parameters
----------
data
The training data series
valid_data
The validation data series
Returns
-------
is_numerical
Whether the column is a numerical column
"""
try:
numerical_data = pd.to_numeric(data)
if valid_data is not None:
numerical_valid_data = pd.to_numeric(valid_data)
return True
except:
return False
def infer_column_problem_types(
train_df: pd.DataFrame,
valid_df: pd.DataFrame,
label_columns: Union[str, List[str]],
problem_type: Optional[str] = None,
provided_column_types: Optional[Dict] = None) -> Tuple[collections.OrderedDict, str]:
"""Infer the column types of the data frame + the problem type
Parameters
----------
train_df
The training Pandas DataFrame
valid_df
The validation Pandas DataFrame
label_columns
The chosen label column names
problem_type
The type of the problem
provided_column_types
Additional dictionary that you can use to specify the columns types that you know.
{'col_name': TYPE}
Returns
-------
column_types
Dictionary of column types
If the column does not contain any useful information, we will filter the column with
type = NULL
problem_type
The inferred problem type
"""
if isinstance(label_columns, str):
label_columns = [label_columns]
elif isinstance(label_columns, (list, tuple)):
pass
else:
raise NotImplementedError(f'label_columns is not supported. label_columns={label_columns}.')
label_set = set(label_columns)
assert len(label_set) == 1, 'Currently, only a single label column is supported.'
column_types = collections.OrderedDict()
# Process all feature columns
for col_name in train_df.columns:
is_label = col_name in label_set
if provided_column_types is not None and col_name in provided_column_types:
column_types[col_name] = provided_column_types[col_name]
continue
if is_label:
num_train_missing = train_df[col_name].isnull().sum()
num_valid_missing = valid_df[col_name].isnull().sum()
if num_train_missing > 0:
raise ValueError(f'Label column "{col_name}" contains missing values in the '
f'training data frame. You may want to filter your data because '
f'missing label is currently not supported.')
if num_valid_missing > 0:
raise ValueError(f'Label column "{col_name}" contains missing values in the '
f'validation data frame. You may want to filter your data because '
f'missing label is currently not supported.')
if problem_type == MULTICLASS or problem_type == BINARY:
column_types[col_name] = CATEGORICAL
continue
elif problem_type == REGRESSION:
column_types[col_name] = NUMERICAL
continue
# Identify columns that provide no information
idx = train_df[col_name].first_valid_index()
if idx is None or len(train_df[col_name].unique()) == 1:
# No valid index, thus, we will just ignore the column
if not is_label:
column_types[col_name] = NULL
else:
warnings.warn(f'Label column "{col_name}" contains only one label. You may want'
f' to check your dataset again.')
# Use the following way for type inference
# 1) Inference categorical column
# 2) Inference numerical column
# 3) All the other columns are treated as text column
if is_categorical_column(train_df[col_name], valid_df[col_name],
is_label=is_label):
column_types[col_name] = CATEGORICAL
elif is_numerical_column(train_df[col_name], valid_df[col_name]):
column_types[col_name] = NUMERICAL
else:
column_types[col_name] = TEXT
problem_type = infer_problem_type(column_types, label_columns[0], train_df, problem_type)
return column_types, problem_type
def printable_column_type_string(column_types):
ret = 'Column Types:\n'
for col_name, col_type in column_types.items():
ret += f' - "{col_name}": {col_type}\n'
return ret
def infer_problem_type(column_types, label_column, data_df,
provided_problem_type=None):
"""Inference the type of the problem based on type of the column and
the training data.
Also, it will try to check the correctness of the column types and the provided problem_type.
Parameters
----------
column_types
Type of the columns
label_column
The label column
data_df
The dataframe
provided_problem_type
The provided problem type
Returns
-------
problem_type
Type of the problem
"""
if provided_problem_type is not None:
if provided_problem_type == MULTICLASS or provided_problem_type == BINARY:
err_msg = f'Provided problem type is "{provided_problem_type}" while the number of ' \
f'unique value in the label column is {len(data_df[label_column].unique())}'
if provided_problem_type == BINARY and len(data_df[label_column].unique()) != 2:
raise AssertionError(err_msg)
elif provided_problem_type == MULTICLASS and len(data_df[label_column].unique()) <= 2:
raise AssertionError(err_msg)
return provided_problem_type
else:
if column_types[label_column] == CATEGORICAL:
if len(data_df[label_column].value_counts()) == 2:
return BINARY
else:
return MULTICLASS
elif column_types[label_column] == NUMERICAL:
return REGRESSION
else:
raise ValueError(f'The label column "{label_column}" has type'
f' "{column_types[label_column]}" and is supported yet.')
|
code/tools/prepare_instance.py | santomon/taskonomy | 789 | 45333 | import subprocess
import os
def download_task_model(task):
m_path = os.path.join('/home/ubuntu/s3', "model_log_final", task,
"logs/model.permanent-ckpt")
dirs, fname = os.path.split(m_path)
dst_dir = dirs.replace('/home/ubuntu/s3', "s3://taskonomy-unpacked-oregon")
tmp_path = "/home/ubuntu/temp/{}".format(task)
subprocess.call('mkdir -p {}'.format(tmp_path), shell=True)
tmp_fname = os.path.join(tmp_path, fname)
aws_cp_command = "aws s3 cp {}.data-00000-of-00001 {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.meta {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.index {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d colorization jigsaw \
reshade rgb2depth rgb2mist rgb2sfnorm \
room_layout segment25d segment2d vanishing_point_well_defined \
segmentsemantic_rb class_1000 class_places impainting_whole'
list_of_tasks = 'impainting_whole'
list_of_tasks = list_of_tasks.split()
for t in list_of_tasks:
download_task_model(t)
|
resotocore/resotocore/db/jobdb.py | someengineering/resoto | 126 | 45393 | <filename>resotocore/resotocore/db/jobdb.py
from resotocore.db.async_arangodb import AsyncArangoDB
from resotocore.db.entitydb import EntityDb, EventEntityDb, ArangoEntityDb
from resotocore.task.task_description import Job
JobDb = EntityDb[Job]
EventJobDb = EventEntityDb[Job]
def job_db(db: AsyncArangoDB, collection: str) -> ArangoEntityDb[Job]:
return ArangoEntityDb(db, collection, Job, lambda k: k.id)
|
todo/migrations/0004_rename_list_tasklist.py | Sowmya-1998/https-github.com-shacker-django-todo | 567 | 45401 | <gh_stars>100-1000
# Generated by Django 2.0.2 on 2018-02-09 23:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("auth", "0009_alter_user_last_name_max_length"),
("todo", "0003_assignee_optional"),
]
operations = [
migrations.CreateModel(
name="TaskList",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(max_length=60)),
("slug", models.SlugField(default="")),
(
"group",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.Group"),
),
],
options={"verbose_name_plural": "Lists", "ordering": ["name"]},
),
migrations.AlterUniqueTogether(name="list", unique_together=set()),
migrations.RemoveField(model_name="list", name="group"),
migrations.RemoveField(model_name="item", name="list"),
migrations.DeleteModel(name="List"),
migrations.AddField(
model_name="item",
name="task_list",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="todo.TaskList"
),
),
migrations.AlterUniqueTogether(name="tasklist", unique_together={("group", "slug")}),
]
|
app/apiv2/internal/tasking/mobius_task.py | Joey-Wondersign/Staffjoy-suite-Joey | 890 | 45459 | from flask_restful import marshal, abort, Resource
from app.models import Schedule2
from app.apiv2.decorators import permission_sudo
from app.apiv2.marshal import tasking_schedule_fields
class MobiusTaskApi(Resource):
method_decorators = [permission_sudo]
def get(self, schedule_id):
""" Peek at a schedule """
s = Schedule2.query.get_or_404(schedule_id)
return marshal(s, tasking_schedule_fields)
def delete(self, schedule_id):
""" Mark a task as done """
s = Schedule2.query.get_or_404(schedule_id)
if s.state != "mobius-processing":
abort(400)
s.transition_to_published()
return "{}", 204
|
genpac/template.py | kaixinguo360/genpac | 2,331 | 45516 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from ._compat import string_types
from .util import get_resource_path, read_file
# 模板文件
# is_buildin == True时为内建模板文件,在脚本源码目录下寻找
class TemplateFile(object):
def __init__(self, path, is_buildin=False):
self.tpl_file = get_resource_path(path) if is_buildin else path
def __str__(self):
return read_file(self.tpl_file, fail_msg='读取自定义模板文件{path}失败')
PAC = TemplateFile('res/tpl-pac.js', True)
PAC_MIN = TemplateFile('res/tpl-pac.min.js', True)
PAC_PRECISE = TemplateFile('res/tpl-pac-precise.js', True)
PAC_PRECISE_MIN = TemplateFile('res/tpl-pac-precise.min.js', True)
WINGY = TemplateFile('res/tpl-wingy.yaml', True)
DNSMASQ = '''
#! __GENPAC__
__DNSMASQ__
#! Generated: __GENERATED__
#! GFWList: __GFWLIST_DETAIL__
'''
SS_ACL = '''
#! __GENPAC__
[bypass_all]
[proxy_list]
__GFWED_RULES__
#! Generated: __GENERATED__
#! GFWList: __GFWLIST_DETAIL__
'''
POTATSO = '''
#! __GENPAC__
[RULESET.gfwed]
name = "GFWed rules"
rules = [
__GFWED_RULES__
]
[RULESET.direct]
name = "Direct rules"
rules = [
__DIRECT_RULES__
]
#! Generated: __GENERATED__
#! GFWList: __GFWLIST_DETAIL__
'''
# 去除文本模板的前后换行符
for name in dir():
if name.isupper() and isinstance(vars()[name], string_types):
vars()[name] = vars()[name].strip('\n')
|
tests/ut/datavisual/data_transform/test_data_loader.py | fapbatista/mindinsight | 216 | 45542 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test mindinsight.datavisual.data_transform.data_loader.
Usage:
pytest tests/ut/datavisual
"""
import os
import shutil
import tempfile
import pytest
from mindinsight.datavisual.common.exceptions import SummaryLogPathInvalid
from mindinsight.datavisual.data_transform import data_loader
from mindinsight.datavisual.data_transform.data_loader import DataLoader
from ..mock import MockLogger
class TestDataLoader:
"""Test data_loader."""
@classmethod
def setup_class(cls):
data_loader.logger = MockLogger
def setup_method(self):
self._summary_dir = tempfile.mkdtemp()
if os.path.exists(self._summary_dir):
shutil.rmtree(self._summary_dir)
os.mkdir(self._summary_dir)
def teardown_method(self):
if os.path.exists(self._summary_dir):
shutil.rmtree(self._summary_dir)
def _generate_files(self, dir_path, file_list):
for file_name in file_list:
with open(os.path.join(dir_path, file_name), 'w'):
pass
def test_load_with_not_file_list(self):
"""Test loading method with empty file list."""
loader = DataLoader(self._summary_dir)
with pytest.raises(SummaryLogPathInvalid):
loader.load()
assert 'No valid files can be loaded' in str(MockLogger.log_msg['warning'])
def test_load_with_invalid_file_list(self):
"""Test loading method with valid path and invalid file_list."""
file_list = ['summary.abc01', 'summary.abc02']
self._generate_files(self._summary_dir, file_list)
loader = DataLoader(self._summary_dir)
with pytest.raises(SummaryLogPathInvalid):
loader.load()
assert 'No valid files can be loaded' in str(MockLogger.log_msg['warning'])
def test_load_success(self):
"""Test loading method with valid path and file_list."""
dir_path = tempfile.NamedTemporaryFile().name
if not os.path.exists(dir_path):
os.mkdir(dir_path)
file_list = ['summary.001', 'summary.002']
self._generate_files(dir_path, file_list)
dataloader = DataLoader(dir_path)
dataloader.load()
assert dataloader._loader is not None
shutil.rmtree(dir_path)
|
app/tests/teams_tests/test_views.py | njmhendrix/grand-challenge.org | 101 | 45554 | <reponame>njmhendrix/grand-challenge.org
import pytest
from django.conf import settings
from django.test import Client
from tests.factories import TeamFactory, TeamMemberFactory
from tests.utils import (
assert_viewname_redirect,
assert_viewname_status,
get_view_for_user,
validate_admin_or_participant_view,
validate_open_view,
)
def validate_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
"""
Assert that a view is only accessible to administrators or participants
of that particular challenge.
"""
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.challenge_set_1.non_participant),
(200, two_challenge_set.challenge_set_1.participant),
(403, two_challenge_set.challenge_set_1.participant1),
(200, two_challenge_set.challenge_set_1.creator),
(200, two_challenge_set.challenge_set_1.admin),
(403, two_challenge_set.challenge_set_2.non_participant),
(403, two_challenge_set.challenge_set_2.participant),
(403, two_challenge_set.challenge_set_2.participant1),
(403, two_challenge_set.challenge_set_2.creator),
(403, two_challenge_set.challenge_set_2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
user=test[1],
**kwargs,
)
def validate_member_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
"""
Assert that a view is only accessible to administrators or participants
of that particular challenge.
"""
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.challenge_set_1.non_participant),
(200, two_challenge_set.challenge_set_1.participant),
(200, two_challenge_set.challenge_set_1.participant1),
(200, two_challenge_set.challenge_set_1.creator),
(200, two_challenge_set.challenge_set_1.admin),
(403, two_challenge_set.challenge_set_2.non_participant),
(403, two_challenge_set.challenge_set_2.participant),
(403, two_challenge_set.challenge_set_2.participant1),
(403, two_challenge_set.challenge_set_2.creator),
(403, two_challenge_set.challenge_set_2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
user=test[1],
**kwargs,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"view", ["teams:list", "teams:create", "teams:member-create"]
)
def test_admin_or_participant_permissions(client, two_challenge_sets, view):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
if view in ("teams:detail", "teams:member-create"):
pk = team.pk
else:
pk = None
validate_admin_or_participant_view(
viewname=view,
reverse_kwargs={"pk": pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_open_views(client, challenge_set):
team = TeamFactory(
challenge=challenge_set.challenge, owner=challenge_set.participant
)
validate_open_view(
viewname="teams:detail",
reverse_kwargs={"pk": team.pk},
challenge_set=challenge_set,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("view", ["teams:update", "teams:delete"])
def test_team_update_delete_permissions(client, two_challenge_sets, view):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant1,
)
validate_owner_or_admin_view(
viewname=view,
reverse_kwargs={"pk": team.pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_team_member_delete_permissions(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
team_member = TeamMemberFactory(
team=team, user=two_challenge_sets.challenge_set_1.participant1
)
validate_member_owner_or_admin_view(
viewname="teams:member-delete",
reverse_kwargs={"pk": team_member.pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("team_name", ["test_team_name"])
def test_team_creation(client, two_challenge_sets, team_name):
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant,
data={"name": team_name},
)
assert response.status_code == 302
response = get_view_for_user(
url=response.url,
client=client,
user=two_challenge_sets.challenge_set_1.participant,
)
assert response.status_code == 200
assert team_name in response.rendered_content.lower()
@pytest.mark.django_db
def test_team_member_addition(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
assert two_challenge_sets.challenge_set_1.participant in team.get_members()
assert (
two_challenge_sets.challenge_set_1.participant1
not in team.get_members()
)
# Participant1 requests to join team
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert (
two_challenge_sets.challenge_set_1.participant1 in team.get_members()
)
assert response.status_code == 302
@pytest.mark.django_db
def test_unique_membership(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
team1 = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant1,
)
# Try to create a new team, should be denied
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant,
data={"name": "thisteamshouldnotbecreated"},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# Participant1 requests to join team, should be denied
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# participant12 should be able to create a team in their challenge and join
# another team
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_2.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
data={"name": "thisteamshouldbecreated"},
)
assert response.status_code == 302
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 302
assert two_challenge_sets.participant12 in team.get_members()
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
reverse_kwargs={"pk": team1.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
|
test/programytest/config/brain/test_dynamic.py | cdoebler1/AIML2 | 345 | 45561 | import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.brain.dynamic import BrainDynamicsConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
class BrainDynamicsConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
number: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, dynamic_config.dynamic_vars)
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric', 'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral'}, dynamic_config.dynamic_sets)
self.assertEquals({'ROMANTODEC': 'programy.dynamic.maps.roman.MapRomanToDecimal', 'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman'}, dynamic_config.dynamic_maps)
def test_with_missing_vars_sets_maps(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_with_missing_vars_sets_maps2(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
something: else
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_to_yaml_defaults(self):
yaml = {}
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.to_yaml(yaml, defaults=True)
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, yaml['variables'])
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric',
'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral',
'STOPWORD': 'programy.dynamic.sets.stopword.IsStopWord',
'SYNSETS': 'programy.dynamic.sets.synsets.IsSynset'}, yaml['sets'])
self.assertEquals({'ROMANTODDEC': 'programy.dynamic.maps.roman.MapRomanToDecimal',
'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman',
'LEMMATIZE': 'programy.dynamic.maps.lemmatize.LemmatizeMap',
'STEMMER': 'programy.dynamic.maps.stemmer.StemmerMap'}, yaml['maps'])
def test_to_yaml_no_defaults(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
number: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
data = {}
dynamic_config.to_yaml(data, defaults=False)
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, data['variables'])
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric', 'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral'}, data['sets'])
self.assertEquals({'ROMANTODEC': 'programy.dynamic.maps.roman.MapRomanToDecimal', 'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman'}, data['maps'])
def test_to_yaml_no_defaults_no_data(self):
yaml = {}
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.to_yaml(yaml, defaults=False)
self.assertEquals({}, yaml['variables'])
self.assertEquals({}, yaml['sets'])
self.assertEquals({}, yaml['maps'])
def test_defaults(self):
dynamic_config = BrainDynamicsConfiguration()
data = {}
dynamic_config.to_yaml(data, True)
BrainDynamicsConfigurationTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertTrue('sets' in data)
test.assertEqual(data['sets']['NUMBER'], 'programy.dynamic.sets.numeric.IsNumeric')
test.assertEqual(data['sets']['ROMAN'], 'programy.dynamic.sets.roman.IsRomanNumeral')
test.assertEqual(data['sets']['STOPWORD'], 'programy.dynamic.sets.stopword.IsStopWord')
test.assertEqual(data['sets']['SYNSETS'], 'programy.dynamic.sets.synsets.IsSynset')
test.assertTrue('maps' in data)
test.assertEqual(data['maps']['ROMANTODDEC'], 'programy.dynamic.maps.roman.MapRomanToDecimal')
test.assertEqual(data['maps']['DECTOROMAN'], 'programy.dynamic.maps.roman.MapDecimalToRoman')
test.assertEqual(data['maps']['LEMMATIZE'], 'programy.dynamic.maps.lemmatize.LemmatizeMap')
test.assertEqual(data['maps']['STEMMER'], 'programy.dynamic.maps.stemmer.StemmerMap')
test.assertTrue('variables' in data)
test.assertEqual(data['variables']['GETTIME'], 'programy.dynamic.variables.datetime.GetTime')
|
api/organisations/managers.py | mevinbabuc/flagsmith | 1,259 | 45575 | <reponame>mevinbabuc/flagsmith
from django.db.models import Manager
from permissions.models import ORGANISATION_PERMISSION_TYPE
class OrganisationPermissionManager(Manager):
def get_queryset(self):
return super().get_queryset().filter(type=ORGANISATION_PERMISSION_TYPE)
|
nuplan/planning/metrics/evaluation_metrics/common/clearance_from_static_agents.py | motional/nuplan-devkit | 128 | 45595 | from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional
import numpy as np
import numpy.typing as npt
from nuplan.common.actor_state.agent import Agent
from nuplan.common.actor_state.ego_state import EgoState
from nuplan.common.actor_state.vehicle_parameters import get_pacifica_parameters
from nuplan.common.geometry.compute import signed_lateral_distance, signed_longitudinal_distance
from nuplan.planning.metrics.evaluation_metrics.base.metric_base import MetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, Statistic, TimeSeries
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
from nuplan.planning.simulation.observation.observation_type import DetectionsTracks
@dataclass
class EgoAgentPair:
"""Class to pair ego and agent."""
ego_state: EgoState # Ego state
agent: Agent # Agent
@dataclass
class EgoToAgentDistances:
"""
Class to keep track of the history of projected distances from ego to an agent.
It also contains the length of the agent.
"""
agent_lengths: List[float] # A list of Length of agents [m]
longitudinal_distances: List[float] # Longitudinal distance from ego to the agent [m]
lateral_distances: List[float] # Lateral distance from ego to the agent [m]
class ClearanceFromStaticAgentsStatistics(MetricBase):
"""Metric on clearance while passing static vehicles."""
def __init__(self, name: str, category: str, lateral_distance_threshold: float) -> None:
"""
Initializes the ClearanceFromStaticAgentsStatistics class
:param name: Metric name
:param category: Metric category
:param lateral_distance_threshold: Agents laterally further away than this threshold are not considered.
"""
super().__init__(name=name, category=category)
self._lateral_distance_threshold = lateral_distance_threshold
self._ego_half_length = get_pacifica_parameters().half_length
def compute_score(
self,
scenario: AbstractScenario,
metric_statistics: Dict[str, Statistic],
time_series: Optional[TimeSeries] = None,
) -> float:
"""Inherited, see superclass."""
# TODO: Define the metric score
return 0.0
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the estimated metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return the estimated metric.
"""
# Compute projected distances
agents_distances = self._extract_agent_projected_distances(history)
clearances_during_passing = self._extract_passing_clearances(agents_distances)
if not clearances_during_passing:
return []
statistics = {
MetricStatisticsType.MAX: Statistic(
name='max_clearance_overtaking_static_agent', unit='meters', value=np.amax(clearances_during_passing)
),
MetricStatisticsType.MIN: Statistic(
name='min_clearance_overtaking_static_agent', unit='meters', value=np.amin(clearances_during_passing)
),
MetricStatisticsType.P90: Statistic(
name='p90_clearance_overtaking_static_agent',
unit='meters',
value=np.percentile(np.abs(clearances_during_passing), 90),
),
}
results = self._construct_metric_results(metric_statistics=statistics, time_series=None, scenario=scenario)
return results # type: ignore
def get_overtake_start_idx(
self, longitudinal_dist: List[float], idx_overtake: int, critical_dist_abs: float
) -> int:
"""
Finds the index of the element which represents the start of the overtake
:param longitudinal_dist: longitudinal distances
:param idx_overtake: index of the distance closest to zero
:param critical_dist_abs: critical distance which represent start of overtake
:return index of the start of overtake.
"""
offset = self._get_overtake_edge(longitudinal_dist[idx_overtake::-1], critical_dist_abs)
return idx_overtake - offset if offset is not None else 0
def get_overtake_end_idx(self, longitudinal_dist: List[float], idx_overtake: int, critical_dist_abs: float) -> int:
"""
Finds the index of the element which represents the end of the overtake
:param longitudinal_dist: longitudinal distances
:param idx_overtake: index of the distance closest to zero
:param critical_dist_abs: critical distance which represent end of overtake
:return index of the end of overtake.
"""
offset = self._get_overtake_edge(longitudinal_dist[idx_overtake:], critical_dist_abs)
return idx_overtake + offset if offset is not None else -1
@staticmethod
def _get_overtake_edge(distances: List[float], critical_distance: float) -> Optional[int]:
"""
Finds the index of the first element which exceeds the given amount in a list
:param distances: list of distances
:param critical_distance: threshold distance
:return index of the first element exceeding the given amount, None if it doesn't happen.
"""
for idx_start, d in enumerate(distances):
if abs(d) > critical_distance:
return idx_start
return None
def _extract_agent_projected_distances(self, history: SimulationHistory) -> Dict[str, EgoToAgentDistances]:
"""
Computes the projected distances, for inactive agents only
:param history: The history of the scenario
:return A dict containing the projected distances to each inactive track in the entire scenario.
"""
agents_distances: Dict[str, EgoToAgentDistances] = {}
inactive_agents_scenario = self._get_inactive_agents_scenario(history)
for track_token, ego_agent_pairs in inactive_agents_scenario.items():
lateral_dist = [
signed_lateral_distance(ego_agent_pair.ego_state.rear_axle, ego_agent_pair.agent.box.geometry)
for ego_agent_pair in ego_agent_pairs
]
longitudinal_dist = [
signed_longitudinal_distance(ego_agent_pair.ego_state.rear_axle, ego_agent_pair.agent.box.geometry)
for ego_agent_pair in ego_agent_pairs
]
lengths = [ego_agent_pair.agent.box.length for ego_agent_pair in ego_agent_pairs]
agents_distances[track_token] = EgoToAgentDistances(
agent_lengths=lengths, longitudinal_distances=longitudinal_dist, lateral_distances=lateral_dist
)
return agents_distances
def _extract_passing_clearances(self, agents_distances: Dict[str, EgoToAgentDistances]) -> List[float]:
"""
Extracts the portion of projected distances relative to the passing of every agent and saves them to a list
:param agents_distances: The projected distances to each inactive agent
:return A list containing the lateral clearance of all inactive agents while ego is passing them.
"""
clearances_during_overtake = []
for distances in agents_distances.values():
max_longitudinal_dist = max(distances.longitudinal_distances)
idx_max = distances.longitudinal_distances.index(max_longitudinal_dist)
min_longitudinal_dist = min(distances.longitudinal_distances)
idx_min = distances.longitudinal_distances.index(min_longitudinal_dist)
if max_longitudinal_dist > 0 > min_longitudinal_dist and idx_max < idx_min:
overtake_idx = int(np.argmin(np.abs(distances.longitudinal_distances)))
if abs(distances.lateral_distances[overtake_idx]) < self._lateral_distance_threshold:
threshold = self._ego_half_length + distances.agent_lengths[overtake_idx] / 2.0
start_idx = self.get_overtake_start_idx(
distances.longitudinal_distances, int(overtake_idx), threshold
)
end_idx = self.get_overtake_end_idx(distances.longitudinal_distances, int(overtake_idx), threshold)
clearances_during_overtake.extend(np.abs(distances.lateral_distances[start_idx : end_idx + 1]))
return clearances_during_overtake
@staticmethod
def _get_inactive_agents_scenario(history: SimulationHistory) -> Dict[str, List[EgoAgentPair]]:
"""
Get a set of agents which are inactive for the full length of the scenario
An inactive agents in this context is an agent that for the entire scenario never moves
:param history: The history from the scenario
:return A dict of inactive tracks and their ego poses with agents.
"""
# Collect a series of agents to their tracks
agent_tracks = defaultdict(list)
for sample in history.data:
ego_state = sample.ego_state
if not isinstance(sample.observation, DetectionsTracks):
continue
for tracked_object in sample.observation.tracked_objects.get_agents():
agent_tracks[tracked_object.track_token].append(EgoAgentPair(ego_state=ego_state, agent=tracked_object))
inactive_track_agents = defaultdict(list)
for track_token, ego_agent_pairs in agent_tracks.items():
velocities: npt.NDArray[np.float64] = np.asarray(
[ego_agent_pair.agent.velocity.magnitude() for ego_agent_pair in ego_agent_pairs]
)
inactive_status = np.isclose(velocities, 0.0)
# Must all inactive
if np.sum(inactive_status) != len(velocities):
continue
inactive_track_agents[track_token] = ego_agent_pairs
return inactive_track_agents
|
dotnet/private/copy_files.bzl | TamsilAmani/selenium | 25,151 | 45630 | <filename>dotnet/private/copy_files.bzl
def _copy_cmd(ctx, file_list, target_dir):
dest_list = []
if file_list == None or len(file_list) == 0:
return dest_list
shell_content = ""
batch_file_name = "%s-copy-files.bat" % (ctx.label.name)
bat = ctx.actions.declare_file(batch_file_name)
src_file_list = []
for (src_file, relative_dest_file) in file_list:
src_file_list.append(src_file)
dest_file = ctx.actions.declare_file("{}/{}".format(target_dir, relative_dest_file))
dest_list.append(dest_file)
shell_content += "@copy /Y \"%s\" \"%s\" >NUL\n" % (
src_file.path.replace("/", "\\"),
dest_file.path.replace("/", "\\"),
)
ctx.actions.write(
output = bat,
content = shell_content,
is_executable = True,
)
ctx.actions.run(
inputs = src_file_list,
tools = [bat],
outputs = dest_list,
executable = "cmd.exe",
arguments = ["/C", bat.path.replace("/", "\\")],
mnemonic = "CopyFile",
progress_message = "Copying files",
use_default_shell_env = True,
)
return dest_list
def _copy_bash(ctx, src_list, target_dir):
dest_list = []
for (src_file, relative_dest_file) in src_list:
dest_file = ctx.actions.declare_file("{}/{}".format(target_dir, relative_dest_file))
dest_list.append(dest_file)
ctx.actions.run_shell(
tools = [src_file],
outputs = [dest_file],
command = "cp -f \"$1\" \"$2\"",
arguments = [src_file.path, dest_file.path],
mnemonic = "CopyFile",
progress_message = "Copying files",
use_default_shell_env = True,
)
return dest_list
def copy_files(ctx, file_list, base_dest_directory, is_windows):
dest_list = []
if is_windows:
dest_list = _copy_cmd(ctx, file_list, base_dest_directory)
else:
dest_list = _copy_bash(ctx, file_list, base_dest_directory)
return dest_list
|
Trakttv.bundle/Contents/Libraries/Shared/trakt_sync/cache/enums.py | disrupted/Trakttv.bundle | 1,346 | 45632 | class Enum(object):
@classmethod
def parse(cls, value):
options = cls.options()
result = []
for k, v in options.items():
if type(v) is not int or v == 0:
continue
if value == 0 or (value & v) == v:
result.append(v)
return result
@classmethod
def options(cls):
result = {}
for key in dir(cls):
if key.startswith('_'):
continue
result[key] = getattr(cls, key)
return result
class Media(Enum):
All = 0
Movies = 1
Shows = 2
Seasons = 4
Episodes = 8
Lists = 16
__map__ = None
@classmethod
def get(cls, key):
if cls.__map__ is None:
cls.__map__ = {
Media.Movies: 'movies',
Media.Shows: 'shows',
Media.Seasons: 'seasons',
Media.Episodes: 'episodes',
Media.Lists: 'lists'
}
return cls.__map__.get(key)
class Data(Enum):
All = 0
Collection = 1
Playback = 2
Ratings = 4
Watched = 8
Watchlist = 16
# Lists
Liked = 32
Personal = 64
__attributes__ = None
__map__ = None
@classmethod
def initialize(cls):
if cls.__attributes__:
return
cls.__attributes__ = {
Data.Collection: {
'interface': 'sync/collection',
'timestamp': 'collected_at'
},
Data.Playback: {
'interface': 'sync/playback',
'timestamp': 'paused_at'
},
Data.Ratings: {
'interface': 'sync/ratings',
'timestamp': 'rated_at'
},
Data.Watched: {
'interface': 'sync/watched',
'timestamp': 'watched_at'
},
Data.Watchlist: {
'interface': 'sync/watchlist',
'timestamp': 'watchlisted_at'
},
# Lists
Data.Liked: {
'interface': 'users/likes',
'timestamp': 'updated_at'
},
Data.Personal: {
'interface': 'users/*/lists',
'timestamp': 'updated_at'
}
}
@classmethod
def get(cls, key):
if cls.__map__ is None:
cls.__map__ = {
Data.Collection: 'collection',
Data.Playback: 'playback',
Data.Ratings: 'ratings',
Data.Watched: 'watched',
Data.Watchlist: 'watchlist',
# Lists
Data.Liked: 'liked',
Data.Personal: 'personal'
}
return cls.__map__.get(key)
@classmethod
def get_interface(cls, key):
return cls.get_attribute(key, 'interface')
@classmethod
def get_timestamp_key(cls, key):
return cls.get_attribute(key, 'timestamp')
@classmethod
def get_attribute(cls, key, attribute):
cls.initialize()
attributes = cls.__attributes__.get(key)
if not attributes:
return None
return attributes.get(attribute) |
Tools/resourceCompiler/mayaExporter/workers/skinclusterExporter.py | giordi91/SirEngineThe3rd | 114 | 45650 | import sys
sys.path.append( "E:\\WORK_IN_PROGRESS\\C\\platfoorm\\engine\\misc\\exporters")
from maya import cmds
from maya import OpenMaya
from maya import OpenMayaAnim
import skeletonExporter
reload(skeletonExporter)
import json
MAX_INFLUENCE = 6;
def map_shadow_to_skeleton(root):
data,joints = skeletonExporter.get_skeleton_data(root)
shadow_to_skele = {}
skele_to_shadow={}
#for each joints we need to follow the constraint to find the driver and build
#a map with that data
for j in joints:
const = cmds.listConnections(j + '.tx', d=0,s=1)[0]
driver = cmds.listConnections(const + '.target[0].targetTranslate',s=1,d=0)
shadow_to_skele[j] = driver[0]
skele_to_shadow[driver[0]] = j
return shadow_to_skele, skele_to_shadow
def getWeightsData (mesh,skinNode, skele_to_shadow, joints):
'''
This procedure let you create a dictionary holding all the needed information to rebuild
a skinCluster map
'''
sknN = skinNode
cmds.undoInfo(openChunk = 1)
infls = cmds.skinCluster(skinNode, q=True, inf=True)
weightMap = []
# get the dag path of the shape node
sel = OpenMaya.MSelectionList()
cmds.select(skinNode)
OpenMaya.MGlobal.getActiveSelectionList(sel)
skinClusterObject = OpenMaya.MObject()
sel.getDependNode(0,skinClusterObject )
skinClusterFn = OpenMayaAnim.MFnSkinCluster(skinClusterObject)
cmds.select(mesh)
sel = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(sel)
shapeDag = OpenMaya.MDagPath()
sel.getDagPath(0, shapeDag)
# create the geometry iterator
geoIter = OpenMaya.MItGeometry(shapeDag)
# create a pointer object for the influence count of the MFnSkinCluster
infCount = OpenMaya.MScriptUtil()
infCountPtr = infCount.asUintPtr()
OpenMaya.MScriptUtil.setUint(infCountPtr, 0)
value = OpenMaya.MDoubleArray()
weightMap = []
infls= OpenMaya.MDagPathArray()
skinClusterFn.influenceObjects(infls)
while geoIter.isDone() == False:
skinClusterFn.getWeights(shapeDag, geoIter.currentItem(), value, infCountPtr)
vtx_data ={"idx": geoIter.index(),
"j":[],
"w":[]}
for j in range(0, infls.length()):
if value[j] > 0:
if skele_to_shadow:
jnt_idx = joints.index(skele_to_shadow[infls[j]])
else:
#node = cmds.listConnections(skinN + ".matrix[" + str(j) + "]",s=1,d=0)[0]
#jnt_idx = joints.index(node)
node = infls[j].fullPathName().rsplit("|",1)[1]
#print node
jnt_idx = joints.index(node)
#jnt_idx = j
weight= value[j]
vtx_data["j"].append(int(jnt_idx))
vtx_data["w"].append(float(weight))
currL = len(vtx_data["j"])
if currL>MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "joints got more than "+str(MAX_INFLUENCE) + " infs"
return;
if currL!= MAX_INFLUENCE:
#lets format the data to have always 4 elemets
deltaSize = MAX_INFLUENCE - currL
vtx_data['j'].extend([int(0)]*deltaSize)
vtx_data['w'].extend([0.0]*deltaSize)
if len(vtx_data["j"]) != MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "wrong formatting after correction"
if len(vtx_data["w"]) != MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "wrong formatting after correction"
weightMap.append(vtx_data)
geoIter.next()
cmds.undoInfo(closeChunk = 1)
print "------> WeightMap has been saved!"
return weightMap
def export_skin(root, skin_name, path, mesh , tootle_path=None, is_shadow=True):
data,joints = skeletonExporter.get_skeleton_data(root)
#print joints.index("L_EyeAim0")
if is_shadow:
print "----> Remapping to shadow skeleton"
shadow_to_skele, skele_to_shadow = map_shadow_to_skeleton(root)
data = getWeightsData(mesh,skin_name,skele_to_shadow, joints)
else :
data = getWeightsData(mesh,skin_name,None, joints)
full = {"type":"skinCluster",
"data":data,
"skeleton": "dogSkeleton"
}
if tootle_path != None:
#read in the tootle
print "---> remapping skin using tootle data"
t = open(tootle_path, 'r')
tootle_map = json.load(t)
newData = [0]*len(full["data"])
for i,d in enumerate(full["data"]):
new = tootle_map[str(i)]
newData[new] = d
full["data"] = newData
else:
print "skippping tootle"
to_save = json.dumps(full)
f = open( path, 'w')
f.write(to_save)
f.close()
print "saved to", path
if __name__ == "__main__" or __name__ == "__builtin__":
print "exporting skin"
root = "root"
skin = "skinCluster1"
path = r"E:\WORK_IN_PROGRESS\C\platfoorm\engine\misc\exporters\temp_data\mannequin_skin.json"
mesh = "mannequin"
tootle_path = r"E:\WORK_IN_PROGRESS\C\platfoorm\engine\misc\exporters\temp_data\mannequin.tootle"
tootle_path=None
export_skin(root, skin, path, mesh, tootle_path, False)
"""
data,joints = skeleton_exporter.get_skeleton_data(root)
shadow_to_skele, skele_to_shadow = map_shadow_to_skeleton(root)
data = getWeightsData(mesh,skin,skele_to_shadow, joints)
full = {"type":"skinCluster",
"data":data,
"skeleton": "dogSkeleton"
}
to_save = json.dumps(full)
f = open( path, 'w')
f.write(to_save)
f.close()
print "saved to", path
"""
|
gluon/gluoncv2/models/res2net.py | naviocean/imgclsmob | 2,649 | 45661 | """
Res2Net for ImageNet-1K, implemented in Gluon.
Original paper: 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
"""
__all__ = ['Res2Net', 'res2net50_w14_s8', 'res2net50_w26_s8']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1, conv3x3, conv1x1_block
from .resnet import ResInitBlock
from .preresnet import PreResActivation
class HierarchicalConcurrent(nn.HybridSequential):
"""
A container for hierarchical concatenation of blocks with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
multi_input : bool, default False
Whether input is multiple.
"""
def __init__(self,
axis=1,
multi_input=False,
**kwargs):
super(HierarchicalConcurrent, self).__init__(**kwargs)
self.axis = axis
self.multi_input = multi_input
def hybrid_forward(self, F, x):
out = []
y_prev = None
if self.multi_input:
xs = F.split(x, axis=self.axis, num_outputs=len(self._children.values()))
for i, block in enumerate(self._children.values()):
if self.multi_input:
y = block(xs[i])
else:
y = block(x)
if y_prev is not None:
y = y + y_prev
out.append(y)
y_prev = y
out = F.concat(*out, dim=self.axis)
return out
class Res2NetUnit(HybridBlock):
"""
Res2Net unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
width : int
Width of filters.
scale : int
Number of scale.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width,
scale,
bn_use_global_stats,
**kwargs):
super(Res2NetUnit, self).__init__(**kwargs)
self.scale = scale
downsample = (strides != 1)
self.resize_identity = (in_channels != out_channels) or downsample
mid_channels = width * scale
brn_channels = width
with self.name_scope():
self.reduce_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.branches = HierarchicalConcurrent(axis=1, multi_input=True, prefix="")
if downsample:
self.branches.add(conv1x1(
in_channels=brn_channels,
out_channels=brn_channels,
strides=strides))
else:
self.branches.add(Identity())
for i in range(scale - 1):
self.branches.add(conv3x3(
in_channels=brn_channels,
out_channels=brn_channels,
strides=strides))
self.preactiv = PreResActivation(in_channels=mid_channels)
self.merge_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
y = self.reduce_conv(x)
y = self.branches(y)
y = self.preactiv(y)
y = self.merge_conv(y)
y = y + identity
y = self.activ(y)
return y
class Res2Net(HybridBlock):
"""
Res2Net model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width : int
Width of filters.
scale : int
Number of scale.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
width,
scale,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(Res2Net, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(Res2NetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width=width,
scale=scale,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_res2net(blocks,
width,
scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Res2Net model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width : int
Width of filters.
scale : int
Number of scale.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
bottleneck = True
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported Res2Net with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = Res2Net(
channels=channels,
init_block_channels=init_block_channels,
width=width,
scale=scale,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def res2net50_w14_s8(**kwargs):
"""
Res2Net-50 (14wx8s) model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_res2net(blocks=50, width=14, scale=8, model_name="res2net50_w14_s8", **kwargs)
def res2net50_w26_s8(**kwargs):
"""
Res2Net-50 (26wx8s) model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_res2net(blocks=50, width=26, scale=8, model_name="res2net50_w14_s8", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
res2net50_w14_s8,
res2net50_w26_s8,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != res2net50_w14_s8 or weight_count == 8231732)
assert (model != res2net50_w26_s8 or weight_count == 11432660)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
rls/common/decorator.py | StepNeverStop/RLs | 371 | 45695 | <reponame>StepNeverStop/RLs
#!/usr/bin/env python3
# encoding: utf-8
import functools
import torch as th
from rls.utils.converter import to_numpy, to_tensor
def lazy_property(func):
attribute = '_lazy_' + func.__name__
@property
# 将原函数对象(func)的指定属性复制给包装函数对象(wrapper), 默认有 module、name、doc,或者通过参数选择
@functools.wraps(func)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, func(self))
return getattr(self, attribute)
return wrapper
def iton(func, dtype=th.float32, device='cpu'):
def wrapper(*args, **kwargs):
if args and hasattr(args[0], 'device'):
_device = getattr(args[0], 'device')
args = [args[0]] + \
[to_tensor(x, dtype=dtype, device=_device) for x in args[1:]]
else:
args = [to_tensor(x, dtype=dtype, device=device) for x in args]
kwargs = {k: to_tensor(v, dtype=dtype, device=device)
for k, v in kwargs.items()}
output = func(*args, **kwargs)
output = to_numpy(output)
return output
return wrapper
|
detectors/__init__.py | bc-jcarlson/secret-bridge | 152 | 45717 | from detectors.detectsecrets import DetectSecrets
from detectors.gitsecrets import GitSecrets
from detectors.trufflehog import TruffleHog
# TODO: Turn this into a registry to match the notifiers pattern?
AvailableDetectors = {
'detect-secrets': DetectSecrets,
'git-secrets': GitSecrets,
'trufflehog': TruffleHog
}
|
fhir/resources/tests/test_specimendefinition.py | cstoltze/fhir.resources | 144 | 45732 | <reponame>cstoltze/fhir.resources
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/SpecimenDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import specimendefinition
def impl_specimendefinition_1(inst):
assert inst.id == "2364"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patientPreparation[0].text == "12 hour fasting"
assert inst.patientPreparation[1].coding[0].code == "263678003"
assert inst.patientPreparation[1].coding[0].display == "At rest"
assert inst.patientPreparation[1].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
assert inst.timeAspect == "preferrably morning time"
assert inst.typeCollected.coding[0].code == "122555007"
assert inst.typeCollected.coding[0].display == "Venous blood specimen"
assert inst.typeCollected.coding[0].system == "http://snomed.info/sct"
assert inst.typeTested[0].container.cap.coding[0].code == "yellow"
assert inst.typeTested[0].container.cap.coding[0].display == "yellow cap"
assert (
inst.typeTested[0].container.cap.coding[0].system == "urn:iso:std:iso:6710:2017"
)
assert inst.typeTested[0].container.material.coding[0].code == "61088005"
assert inst.typeTested[0].container.material.coding[0].display == "plastic"
assert (
inst.typeTested[0].container.material.coding[0].system
== "http://snomed.info/sct"
)
assert inst.typeTested[0].container.minimumVolumeQuantity.code == "mL"
assert (
inst.typeTested[0].container.minimumVolumeQuantity.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].container.minimumVolumeQuantity.unit == "ml"
assert float(inst.typeTested[0].container.minimumVolumeQuantity.value) == float(2)
assert inst.typeTested[0].container.type.coding[0].code == "702281005"
assert inst.typeTested[0].container.type.coding[0].display == (
"Evacuated blood collection tube, thrombin/clot activator/gel" " separator"
)
assert (
inst.typeTested[0].container.type.coding[0].system == "http://snomed.info/sct"
)
assert inst.typeTested[0].handling[0].maxDuration.code == "min"
assert (
inst.typeTested[0].handling[0].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].maxDuration.unit == "minute"
assert float(inst.typeTested[0].handling[0].maxDuration.value) == float(60)
assert (
inst.typeTested[0].handling[0].temperatureQualifier.text
== "Ambient temperature"
)
assert inst.typeTested[0].handling[0].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[0].handling[0].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[0].handling[0].temperatureRange.high.value) == float(
25
)
assert inst.typeTested[0].handling[0].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[0].handling[0].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[0].handling[0].temperatureRange.low.value) == float(15)
assert inst.typeTested[0].handling[1].maxDuration.code == "h"
assert (
inst.typeTested[0].handling[1].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].maxDuration.unit == "hour"
assert float(inst.typeTested[0].handling[1].maxDuration.value) == float(8)
assert (
inst.typeTested[0].handling[1].temperatureQualifier.text
== "Refrigerated temperature"
)
assert inst.typeTested[0].handling[1].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[0].handling[1].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[0].handling[1].temperatureRange.high.value) == float(8)
assert inst.typeTested[0].handling[1].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[0].handling[1].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[0].handling[1].temperatureRange.low.value) == float(2)
assert inst.typeTested[0].preference == "preferred"
assert inst.typeTested[0].type.coding[0].code == "119364003"
assert inst.typeTested[0].type.coding[0].display == "Serum specimen"
assert inst.typeTested[0].type.coding[0].system == "http://snomed.info/sct"
assert inst.typeTested[1].container.cap.coding[0].code == "green"
assert inst.typeTested[1].container.cap.coding[0].display == "green cap"
assert (
inst.typeTested[1].container.cap.coding[0].system == "urn:iso:std:iso:6710:2017"
)
assert inst.typeTested[1].container.material.coding[0].code == "32039001"
assert inst.typeTested[1].container.material.coding[0].display == "glass"
assert (
inst.typeTested[1].container.material.coding[0].system
== "http://snomed.info/sct"
)
assert inst.typeTested[1].container.minimumVolumeQuantity.code == "mL"
assert (
inst.typeTested[1].container.minimumVolumeQuantity.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].container.minimumVolumeQuantity.unit == "ml"
assert float(inst.typeTested[1].container.minimumVolumeQuantity.value) == float(2)
assert inst.typeTested[1].container.type.coding[0].code == "767390000"
assert inst.typeTested[1].container.type.coding[0].display == (
"Evacuated blood collection tube with heparin lithium and gel" " separator"
)
assert (
inst.typeTested[1].container.type.coding[0].system == "http://snomed.info/sct"
)
assert inst.typeTested[1].handling[0].maxDuration.code == "min"
assert (
inst.typeTested[1].handling[0].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].maxDuration.unit == "minute"
assert float(inst.typeTested[1].handling[0].maxDuration.value) == float(60)
assert (
inst.typeTested[1].handling[0].temperatureQualifier.text
== "Ambient temperature"
)
assert inst.typeTested[1].handling[0].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[1].handling[0].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[1].handling[0].temperatureRange.high.value) == float(
25
)
assert inst.typeTested[1].handling[0].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[1].handling[0].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[1].handling[0].temperatureRange.low.value) == float(15)
assert inst.typeTested[1].handling[1].maxDuration.code == "h"
assert (
inst.typeTested[1].handling[1].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].maxDuration.unit == "hour"
assert float(inst.typeTested[1].handling[1].maxDuration.value) == float(8)
assert (
inst.typeTested[1].handling[1].temperatureQualifier.text
== "Refrigerated temperature"
)
assert inst.typeTested[1].handling[1].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[1].handling[1].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[1].handling[1].temperatureRange.high.value) == float(8)
assert inst.typeTested[1].handling[1].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[1].handling[1].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[1].handling[1].temperatureRange.low.value) == float(2)
assert inst.typeTested[1].preference == "alternate"
assert inst.typeTested[1].rejectionCriterion[0].coding[0].code == "insufficient"
assert (
inst.typeTested[1].rejectionCriterion[0].coding[0].display
== "insufficient specimen volume"
)
assert (
inst.typeTested[1].rejectionCriterion[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/rejection-criteria"
)
assert inst.typeTested[1].rejectionCriterion[1].coding[0].code == "hemolized"
assert (
inst.typeTested[1].rejectionCriterion[1].coding[0].display
== "hemolized specimen"
)
assert (
inst.typeTested[1].rejectionCriterion[1].coding[0].system
== "http://terminology.hl7.org/CodeSystem/rejection-criteria"
)
assert inst.typeTested[1].type.coding[0].code == "119361006"
assert inst.typeTested[1].type.coding[0].display == "Plasma specimen"
assert inst.typeTested[1].type.coding[0].system == "http://snomed.info/sct"
def test_specimendefinition_1(base_settings):
"""No. 1 tests collection for SpecimenDefinition.
Test File: specimendefinition-example-serum-plasma.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "specimendefinition-example-serum-plasma.json"
)
inst = specimendefinition.SpecimenDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "SpecimenDefinition" == inst.resource_type
impl_specimendefinition_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "SpecimenDefinition" == data["resourceType"]
inst2 = specimendefinition.SpecimenDefinition(**data)
impl_specimendefinition_1(inst2)
|
mindinsight/datavisual/data_transform/graph/optimized_graph.py | mindspore-ai/mindinsight | 216 | 45757 | <reponame>mindspore-ai/mindinsight<gh_stars>100-1000
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This file is used to define the MindSpore graph."""
from collections import defaultdict
from mindinsight.datavisual.common.log import logger
from mindinsight.datavisual.data_transform.graph.msgraph import MSGraph
from mindinsight.domain.graph.base import NodeTypeEnum
class OptimizedGraph(MSGraph):
"""The object describes the MindSpore graph, and it is defined in the anf_ir proto file."""
MIN_GROUP_NODE_COUNT = 10
def __init__(self):
super().__init__()
self._load_node_temp_cache = {}
def _inherit_input_output_from_subnode(self, parent_node, subnode_list, filtered_type=None):
"""
Adds the input and output of all direct child nodes to the current node.
Args:
parent_node (Node): The nodes that inherit the input and output of the child nodes.
subnode_list (list[Node]): A list of child nodes that are inherited from the input and output.
filtered_type (set(str)): Filter some input and output that do not require inheritance
based on the node type. Default is filter const node.
Note:
- Only the inputs and outputs of the external scope are inherited.
- Before add_const_node method, if the input is a const,
the scope of the const node is not startswith the name of parent node.
So in this scenario, we need to filter the const nodes.
"""
filtered_type = {NodeTypeEnum.CONST.value} if filtered_type is None else filtered_type
for method in ['inputs', 'outputs', 'proxy_inputs', 'proxy_outputs']:
for node in subnode_list:
for item_name, item_attr in getattr(node, method).items():
target_node = self._get_normal_node(node_name=item_name)
if target_node is None:
logger.warning("inherit %s from subnode, target node (%s) is None", method, item_name)
continue
if item_name.startswith(f'{parent_node.name}/'):
continue
if target_node.type in filtered_type:
continue
getattr(parent_node, f'add_{method}')(item_name, item_attr)
def _cache_node(self, node):
"""Store the node in the cache."""
# Notice:
# The additional caching is used to handle the Const, Parameter and LOAD nodes separately later.
super()._cache_node(node)
if node.type == NodeTypeEnum.LOAD.value:
self._load_node_temp_cache.update({node.name: node})
def _delete_nodes_of_cache(self, node_names):
"""Delete node from cache."""
logger.debug("These nodes will be removed from the cache, node names: %s.", node_names)
for name in node_names:
if self._parameter_node_temp_cache.get(name):
self._parameter_node_temp_cache.pop(name)
if self._const_node_temp_cache.get(name):
self._const_node_temp_cache.pop(name)
if self._load_node_temp_cache.get(name):
self._load_node_temp_cache.pop(name)
node = self._get_normal_node(node_name=name)
self._normal_node_map.pop(name)
self._node_id_map_name.pop(node.node_id)
def _parse_data(self, proto_data):
"""
The proto data is parsed and all nodes are stored in the specified structure.
Args:
proto_data (anf_ir_pb2.GraphProto): Refer to anf_ir_pb2.GraphProto object.
"""
logger.info("Start to parse graph proto data.")
self._parse_op_nodes(proto_data.node)
self._parse_parameters(proto_data.parameters)
self._parse_consts(proto_data.const_vals)
self._update_input_after_create_node()
self._update_output_after_create_node()
self._delete_non_computational_ops()
self._clean_no_input_output_node()
self._extract_node_by_single_node_in_scope()
logger.info("Parse proto data end, normal node count(only contain op node, "
"parameter, const): %s.", self.normal_node_count)
def _parse_op_nodes(self, node_protos):
"""
Parse `anf_ir_pb2.NodeProto` object, and create a normal node.
Args:
node_protos (list[anf_ir_pb2.NodeProto]): Refer to anf_ir_pb2.NodeProto.
"""
logger.debug("Start to parse op nodes from proto.")
for topological_index, node_proto in enumerate(node_protos):
if not node_proto.name:
logger.warning("Finding a node with an empty name will not save it.")
continue
if node_proto.full_name.startswith("Gradients") or "optimizer" in node_proto.full_name \
or "opt" in node_proto.instance_name:
continue
self._parse_op_node(topological_index, node_proto)
def _update_input_after_create_node(self):
"""Update the input of node after create node."""
for node in self._normal_node_map.values():
for src_node_id, input_attr in dict(node.inputs).items():
node.delete_inputs(src_node_id)
if not self._is_node_exist(node_id=src_node_id):
continue
src_node = self._get_normal_node(node_id=src_node_id)
input_attr['shape'] = src_node.output_shape
input_attr['data_type'] = src_node.output_data_type
node.add_inputs(src_name=src_node.name, input_attr=input_attr)
nodes = self._list_nodes_without_parameter_const()
for node in nodes:
for src_node_name, _ in dict(node.inputs).items():
if not self._is_node_exist(node_name=src_node_name):
logger.warning("Source node (%s) is None.", src_node_name)
continue
src_node = self._get_normal_node(node_name=src_node_name)
if src_node.type in (NodeTypeEnum.LOAD.value,
NodeTypeEnum.TUPLE_GET_ITEM.value,
NodeTypeEnum.MAKETUPLE.value,
NodeTypeEnum.UPDATE_STATE.value):
node.delete_inputs(src_node_name)
for source_node_name, source_attr in dict(src_node.inputs).items():
source_node = self._get_normal_node(node_name=source_node_name)
if source_node is None:
logger.warning("Source node (%s) is None.", source_node_name)
continue
source_attr['shape'] = source_node.output_shape
source_attr['data_type'] = source_node.output_data_type
node.add_inputs(src_name=source_node.name, input_attr=source_attr)
def _update_output_after_create_node(self):
"""Update the output of node after create node."""
super()._update_output_after_create_node()
nodes = self._list_nodes_without_parameter_const()
for node in nodes:
for src_node_name, _ in dict(node.outputs).items():
if not self._is_node_exist(node_name=src_node_name):
logger.warning("Source node (%s}) is None.", src_node_name)
continue
src_node = self._get_normal_node(node_name=src_node_name)
if src_node.type in (NodeTypeEnum.LOAD.value,
NodeTypeEnum.TUPLE_GET_ITEM.value,
NodeTypeEnum.MAKETUPLE.value,
NodeTypeEnum.UPDATE_STATE.value):
node.delete_outputs(src_node_name)
for source_node_name, source_attr in dict(src_node.outputs).items():
source_node = self._get_normal_node(node_name=source_node_name)
if source_node is None:
logger.warning("Source node (%s) is None.", source_node_name)
continue
source_attr['shape'] = source_node.output_shape
source_attr['data_type'] = source_node.output_data_type
node.add_outputs(src_name=source_node.name, output_attr=source_attr)
def _delete_non_computational_ops(self):
"""Deleted non-computational operators."""
delete_names = []
for node in self._normal_node_map.values():
if node.type in (NodeTypeEnum.LOAD.value,
NodeTypeEnum.TUPLE_GET_ITEM.value,
NodeTypeEnum.MAKETUPLE.value,
NodeTypeEnum.UPDATE_STATE.value):
delete_names.append(node.name)
self._delete_nodes_of_cache(delete_names)
def _list_nodes_without_parameter_const(self):
"""List nodes without parameter and const node."""
nodes = self._normal_node_map.values()
not_expect_type = (NodeTypeEnum.CONST.value, NodeTypeEnum.PARAMETER.value)
nodes = filter(lambda node: node.type not in not_expect_type, nodes)
nodes = sorted(nodes, key=lambda node: node.topological_index)
return nodes
def _extract_node_by_single_node_in_scope(self):
"""Extract node from the scope which has only one node."""
nodes = self._list_nodes_without_parameter_const()
scope_map_types = defaultdict(set)
scope_map_node_cnt = defaultdict(int)
for node in nodes:
if not node.scope or '/' not in node.scope:
continue
scope_map_types[node.scope].add(node.type)
scope_map_node_cnt[node.scope] += 1
filter_scopes = set()
for scope, types in scope_map_types.items():
if len(types) == 1 and scope_map_node_cnt[scope] > 1 and types.pop() in scope:
filter_scopes.add(scope)
for filter_scope in list(filter_scopes):
for scope in scope_map_types:
if scope.startswith(f'{filter_scope}/'):
filter_scopes.remove(filter_scope)
break
if not filter_scopes:
return
for node in nodes:
if node.scope in filter_scopes and '/' in node.scope:
name = node.name.rsplit('/', 1)[1]
new_scope = node.scope.rsplit('/', 1)[0]
new_name = f'{new_scope}/{name}'
self._update_node_name_of_cache(node, new_name)
return
def _clean_no_input_output_node(self):
"""Clean nodes which has no input and output."""
nodes = self._list_nodes_without_parameter_const()
deleted_names = []
for node in nodes:
if not node.inputs and not node.outputs:
deleted_names.append(node.name)
self._delete_nodes_of_cache(deleted_names)
|
Athos/RandomForests/parse_graphviz_to_ezpc_input.py | kanav99/EzPC | 221 | 45761 | """
Authors: <NAME>, <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
This python file takes in a graphviz text file,
creates a tree in memory and outputs the tree's
characteristic (feature and threshold at each node)
where it is ASSUMED that initially each node of
the tree is either leaf or it has 2 children.
This file also takes care of adding dummy nodes
to create a new funtionally equivalent complete
binary tree to be used by EzPC code.
"""
import math
import os
class TreeNode(object):
def __init__(self):
self.left = None
self.right = None
self.value = 0
self.feature = -1
self.depth = -1
def fill_recur(ctx, features, threshold, depth):
ctx.max_depth = max(ctx.max_depth, depth)
if features[ctx.ctr] == -1:
# Leaf Node
node = TreeNode()
node.value = threshold[ctx.ctr]
node.depth = depth
ctx.ctr += 1
return node
else:
node = TreeNode()
node.value = threshold[ctx.ctr]
node.feature = features[ctx.ctr]
node.depth = depth
ctx.ctr += 1
node_left = fill_recur(ctx, features, threshold, depth + 1)
node_right = fill_recur(ctx, features, threshold, depth + 1)
node.left = node_left
node.right = node_right
return node
def is_internal(node):
if node.feature == -1:
return False
else:
return True
def get_to_pad_subtree(ctx, node, depth_diff):
if depth_diff == 1:
# New leafs
node_left = TreeNode()
node_right = TreeNode()
node_left.value = node.value
node_right.value = node.value
node_left.depth = ctx.max_depth + 1 - depth_diff
node_right.depth = ctx.max_depth + 1 - depth_diff
node.left = node_left
node.right = node_right
node.feature = 1
node.value = 0.0
return node
else:
node_left = TreeNode()
node_right = TreeNode()
node_left.value = node.value
node_right.value = node.value
node_left.feature = node.feature
node_right.feature = node.feature
node_left.depth = ctx.max_depth + 1 - depth_diff
node_right.depth = ctx.max_depth + 1 - depth_diff
node_left = get_to_pad_subtree(ctx, node_left, depth_diff - 1)
node_right = get_to_pad_subtree(ctx, node_right, depth_diff - 1)
node.left = node_left
node.right = node_right
node.feature = 1
node.value = 0.0
return node
def pad_to_complete_tree(ctx, node):
if not is_internal(node):
# Leaf node
if node.depth != ctx.max_depth:
# Needs padding
node = get_to_pad_subtree(ctx, node, ctx.max_depth - node.depth)
else:
pad_to_complete_tree(ctx, node.left)
pad_to_complete_tree(ctx, node.right)
def dump_complete_tree(ctx, root):
queue = [root]
ctr_local = 0
while ctr_local < ctx.nodes_in_complete_tree:
current_node = queue[ctr_local]
ctr_local += 1
if is_internal(current_node):
ctx.ezpc_features.append(current_node.feature)
ctx.ezpc_threshold.append(current_node.value)
ctx.ezpc_depth.append(current_node.depth)
queue.append(current_node.left)
queue.append(current_node.right)
else:
ctx.ezpc_features.append(-1)
ctx.ezpc_threshold.append(current_node.value)
ctx.ezpc_depth.append(current_node.depth)
def parse_graphviz_to_ezpc_input(tree_file_path, task, scaling_factor):
with open(tree_file_path, "r") as f:
lines = f.readlines()
lines = lines[1:]
depth = 0
nodes_this_tree = 0
features = []
threshold = []
for i in range(len(lines)):
curline = lines[i]
# print("processing :", curline)
start_location = curline.find('"')
start_location += 1
if start_location == 0:
break
nodes_this_tree += 1
if curline[start_location] == "X":
# This is an internal node
end_location_feature = curline.find("]")
start_location_th = curline.find("<=")
end_location_th = curline.find("\\n")
feature_val = int(curline[start_location + 2 : end_location_feature])
threshold_val = float(curline[start_location_th + 3 : end_location_th])
features.append(feature_val)
threshold.append(threshold_val)
# print("Internal Node")
# print(feature_val)
# print(threshold_val)
else:
# This is a leaf
start_location_val = -1
if task == "reg":
start_location_val = curline.find("value =")
else:
start_location_val = curline.find("class =")
assert start_location_val != -1, (
"Task specified: " + task + " may be incorrect!"
)
end_location_val = curline.find('" filled')
output_val = float(curline[start_location_val + 7 : end_location_val])
features.append(-1)
threshold.append(output_val)
# print("Leaf Node")
# print(output_val)
class Context(object):
def __init__(self):
self.ctr = 0
self.ezpc_features = []
self.ezpc_threshold = []
self.ezpc_depth = []
self.max_depth = -1
self.nodes_in_complete_tree = -1
ctx = Context()
root = fill_recur(ctx, features, threshold, 1)
ctx.nodes_in_complete_tree = pow(2, ctx.max_depth) - 1
# if nodes_in_complete_tree != nodes_this_tree:
# print("[PADDING] Input tree not complete. Padding to make complete.")
# else:
# print("Input tree already complete. No need to pad.")
pad_to_complete_tree(ctx, root)
dump_complete_tree(ctx, root)
model_weights = "weight_sf_" + str(scaling_factor) + ".inp"
ezpc_tree_path = os.path.join(os.path.dirname(tree_file_path), model_weights)
# print("Writing to " + ezpc_tree_path)
# print("[FLOAT TO FIXED] Scaling by 2^" + str(scaling_factor) + " times")
with open(ezpc_tree_path, "a") as output_file:
for i in range(len(ctx.ezpc_features)):
output_file.write(str(ctx.ezpc_features[i]) + "\n")
for i in range(len(ctx.ezpc_threshold)):
output_file.write(
str(int(math.floor((2 ** scaling_factor) * ctx.ezpc_threshold[i])))
+ "\n"
)
return ctx.max_depth
|
lib/sundials_6.0.0/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py | detcitty/math | 256 | 45799 | <gh_stars>100-1000
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# SUNDIALS Copyright Start
# Copyright (c) 2002-2021, Lawrence Livermore National Security
# and Southern Methodist University.
# All rights reserved.
#
# See the top-level LICENSE and NOTICE files for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# SUNDIALS Copyright End
# -----------------------------------------------------------------------------
# imports
import glob
import sys
import pylab as plt
import pandas as pd
import numpy as np
# load mesh data file
mesh = np.loadtxt('mesh.txt', dtype=np.double)
# X,Y,Z = np.meshgrid(mesh[0,:], mesh[1,:], mesh[2,:])
# calculate h
hx = mesh[0,1] - mesh[0,0]
hy = mesh[1,1] - mesh[1,0]
hz = mesh[2,1] - mesh[2,0]
nx = len(mesh[0,:])
ny = len(mesh[1,:])
nz = len(mesh[2,:])
print("nx, ny, nz = %d, %d, %d" % (nx, ny, nz))
print("hx, hy, hz = %g, %g, %g" % (hx, hy, hz))
# load output time file
times = np.loadtxt('t.000000.txt', dtype=np.double)
# load solution data files
ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort()
vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort()
wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort()
udata = []
vdata = []
wdata = []
sys.stdout.write("reading 1/%d...\r" % len(ufiles))
sys.stdout.flush()
for idx in range(0,len(ufiles)):
sys.stdout.write("reading %d/%d...\r" % (idx+1,len(ufiles)))
sys.stdout.flush()
udata.append(pd.read_csv(ufiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
vdata.append(pd.read_csv(vfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
wdata.append(pd.read_csv(wfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
sys.stdout.write("\n")
sys.stdout.flush()
print("stacking...")
udata = pd.concat(udata, axis=1).to_numpy()
vdata = pd.concat(vdata, axis=1).to_numpy()
wdata = pd.concat(wdata, axis=1).to_numpy()
# reshape data into time,x,y,z arrays
print("reshaping...")
nt = len(times)
udata = np.reshape(udata, (nt, nx, ny, nz))
vdata = np.reshape(vdata, (nt, nx, ny, nz))
wdata = np.reshape(wdata, (nt, nx, ny, nz))
# save data to pickle
print("saving...")
np.savez_compressed('output-with-h-%.2e.npz' % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh)
|
BubbleChart/BubbleChart.py | O-Aiden/Danim | 218 | 45808 | <reponame>O-Aiden/Danim
from manimlib.imports import *
from Danim.BubbleChart.BCutils import *
from Danim.BubbleChart.bubble_constants import *
class BubbleChart(VGroup):
# A class to quickly create the bubble chart animation
# may not have the freedom to change things
CONFIG = {
"show_axes_lable": SHOW_AXIS_LABLES, #default True
"show_time_lable": True, # names to show are stored in SHOWN_ENTITY_NAMES
"show_color_lables": True, # default group names
"set_bubble_colors": "randomly",# two options: "by_group" or "randomly"
"x_axis_lable": X_AXIS_LABLE,
"y_axis_lable": Y_AXIS_LABLE,
"show_creation": False
}
def __init__(self, X, Y, R, entity_names, T, **kwargs):
#CONFIG to attributes
digest_config(self,kwargs)
self.entity_names = entity_names
self.times = T
#create axes
(self.axes_config,
self.axes
) = import_data_and_set_axes(X,Y)
#transform X,Y,R into screen coordinates
self.coordinates,self.radiusdata = transform_from_data_to_screencoordinates(X,Y,R,self.axes)
#set the colors of bubbles:
#COLORMAT is a list of shape(num_of_bubbles,1)
#each element is a color array
self.COLORMAT = self.generate_colormatrix()
if not self.show_creation:
#set the bubble to the start time
self.bubbles = set_up_the_bubbles(
self.coordinates[:,0],
self.radiusdata[:,0],
self.axes,
color_mat = self.COLORMAT
)
#create lables
self.lables_creation()
VGroup.__init__(self, **kwargs)
self.add(
self.axes,
self.bubbles,
self.lables
)
else:
self.lables_creation()
VGroup.__init__(self, **kwargs)
self.add(
self.axes,
self.lables
)
#the bubbles and will be created later
#using animation method self.Get_Creation_Animation(directly_show_creation = False)
def get_current_timeindex(self):
return self.times.index(self.time_lable.get_tex_string())
def generate_colormatrix(self, colors = None):
# the color of each bubbles can be set by some group lables
# for example: if each bubble represents a contry, then
# you can set all the contry in Asia as red bubbles,
# North American Contries as blue bubbles
# you need a cvs file called the "Group_lable.csv" to store each tags
# or you can just put a dic to represents that relationship
if self.set_bubble_colors == "by_group":
#generate color matrices with default color red
COLORMAT = [RED]*self.coordinates.shape[0]
#read information from "Group_lable.csv"
group_lable_data = np.array(
pd.DataFrame(
pd.read_csv(GROUP_LABLE_CSV_FILE, encoding = "gbk", index_col = 0),
index = self.entity_names
)
)
#check whether the numbers of rows are the same
assert(len(COLORMAT) == group_lable_data.shape[0])
self.group_index = []
#match color to COLORMAT with relationship in COLOR_LABLE_DICT
for i,lable in enumerate(group_lable_data):
if lable[0] in COLOR_LABLE_DICT:
COLORMAT[i] = COLOR_LABLE_DICT[lable[0]]
self.group_index.append(COLOR_LABLE_INDEX_DICT[lable[0]])
#generate color randomly
elif self.set_bubble_colors == "randomly":
COLORMAT = []
for i in range(0,self.coordinates.shape[0]+1):
COLORMAT.append(random_color())
else:
COLORMAT = [RED*self.coordinates.shape[0]]
return COLORMAT
def lables_creation(self):
#lable creation:
self.lables = VGroup()
if self.show_axes_lable:
#Create the x_axis_lable
self.lables.add(
(TextMobject(
self.x_axis_lable, color = TEXT_COLOR
).scale(
TEXT_SCALE_FACTOR
)
).shift(
self.axes.x_axis.number_to_point(self.axes.x_axis.x_max) + X_LABLE_ADJUST_VECTOR
)
)
#create the y_axis_lable:
self.lables.add(
(TextMobject(
self.y_axis_lable, color = TEXT_COLOR
).scale(
TEXT_SCALE_FACTOR
)
).shift(
self.axes.y_axis.number_to_point(self.axes.y_axis.x_max) + Y_LABLE_ADJUST_VECTOR
)
)
#create the time lable
if self.show_time_lable:
self.time_lable = (TextMobject(
str(self.times[0]),
color = TIME_LABLE_COLOR).scale(
TIME_LABLE_SCALE_FACTOR
)).shift(TIME_LABLE_POSITION)
#self.lables.add(self.time_lable)
#create color lables(with rectangles)
if self.show_color_lables and (not self.show_creation):
entity_color_map = dict(
dict(zip(self.entity_names,self.COLORMAT)),
**COLOR_LABLE_DICT
)
self.color_lables = VGroup()
for i,entity in enumerate(SHOWN_ENTITY_NAMES):
if entity in entity_color_map:
rect = Rectangle(
height = RECT_HIGHT,
width = RECT_WIDTH,
color = entity_color_map[entity],
fill_opacity = 1)
if SHOW_CN_NAMES:
name_to_show = online_translation(entity)
rect_name = TextMobject(name_to_show).scale(
RECT_TEXT_SCALE_FACTOR)
else:
rect_name = TextMobject(entity).scale(
RECT_TEXT_SCALE_FACTOR)
if i == 0:
rect.shift(RECT_POSITION)
rect_name.next_to(rect,RIGHT)
else:
rect.align_to(self.color_lables,direction = LEFT+DOWN)
rect.shift(DOWN* RECT_HIGHT*RECT_INTERVAL_FACTOR)
rect_name.next_to(rect,RIGHT)
self.color_lables.add(rect,rect_name)
self.lables.add(self.color_lables)
def Get_Creation_Animation(
self,
directly_show_creation = True,
maximum_circles_to_show = 50,
creation_time_index = 0,
initial_position = 3*UP + 3*RIGHT
):
creation_time = self.times[creation_time_index]
#Show Creation all together
if directly_show_creation:
self.lables_creation()
#self.add(self.lables)
return ShowCreation(self,run_time = CREATION_RUN_TIME)
#Show creaton with all name listed
else:
self.color_lables = VGroup()
old_bubbles = []
transfered_bubbles = []
name_lables = []
self.circle_index = []
self.bubbles = VGroup()
self.grow_animation = []
self.transfer_animation = []
self.color_lables_animation = []
def generate_circle_matrix(indices):
#indices is the relative index
#position in self.entity_names
new_entity = []
y0 = self.axes.x_axis.number_to_point(self.axes.x_axis.x_max)[1]
for i,name in enumerate(self.entity_names):
if i in indices:
new_entity.append(name)
if not len(old_bubbles) == 0:
start_index = len(old_bubbles)
else:
start_index = 0
for j,name in enumerate(new_entity):
#old_bubble creation
if j == 0:
previous_index = start_index
cornor_index = start_index
old_bubbles.append(
set_up_the_bubbles(
initial_position,
self.radiusdata[indices[j],creation_time_index],
self.axes,
self.COLORMAT[indices[j]],
mode = 'single'
)
)
else:
old_bubbles.append(
set_up_the_bubbles(
np.array([0,0,0]),
self.radiusdata[indices[j],creation_time_index],
self.axes,
self.COLORMAT[indices[j]],
mode = 'single'
)
)
#name_lable creation
if SHOW_CN_NAMES:
name_shown = online_translation(name)
else:
name_shown = name
name_lables.append(
TextMobject(
name_shown
).scale(
NAME_TEXT_SCALE_FACTOR
)
)
name_lables[-1].next_to(old_bubbles[-1],RIGHT)
#check if circle matrix reaches the bottom
height = old_bubbles[-1].get_critical_point(UP)[1] - old_bubbles[-1].get_critical_point(DOWN)[1]
cell = old_bubbles[previous_index].get_critical_point(DOWN)[1]
if not j == 0:
current_VGroup = VGroup(old_bubbles[-1],name_lables[-1])
# if the curreny circle touches the bottom:
if cell - height < y0 + 0.5:
current_VGroup.next_to(old_bubbles[cornor_index],LEFT)
current_VGroup.shift(0.25*LEFT)
cornor_index = len(old_bubbles) - 1
# if the curreny circle does not touch the bottom:
else:
current_VGroup.next_to(previous_VGroup,DOWN)
#transfered_bubbles creation:
transfered_bubbles.append(
set_up_the_bubbles(
self.coordinates[indices[j],creation_time_index],
self.radiusdata[indices[j],creation_time_index],
self.axes,
self.COLORMAT[indices[j]],
mode = 'single'
)
)
#record the circle index
self.circle_index.append(indices[j])
#append the animation
self.grow_animation.append(
AnimationGroup(
FadeIn(
old_bubbles[-1]
),
Write(
name_lables[-1]
),
run_time = SINGLE_GROW_RUN_TIME
)
)
self.transfer_animation.append(
AnimationGroup(
ReplacementTransform(
old_bubbles[-1],
transfered_bubbles[-1]
),
FadeOut(
name_lables[-1]),
run_time = SINGLE_TRANSFER_TIME
)
)
previous_index = len(old_bubbles) - 1
previous_VGroup = VGroup(old_bubbles[-1],name_lables[-1])
if self.set_bubble_colors == "randomly":
indices = []
for i,name in enumerate(self.entity_names):
indices.append(i)
quotient = len(self.entity_names)//maximum_circles_to_show
remainder = len(self.entity_names)%maximum_circles_to_show
for i in range(quotient):
generate_circle_matrix(indices[maximum_circles_to_show*(i):maximum_circles_to_show*(i+1)])
#generate_circle_matrix(indices[maximum_circles_to_show*(i+1):len(self.entity_names)])
self.bubbles = VGroup(*transfered_bubbles)
#if set bubbles by group
#usurally with self.show_color_lables = True:
else:
if self.show_color_lables:
entity_color_map = dict(
dict(zip(self.entity_names,self.COLORMAT)),
**COLOR_LABLE_DICT
)
self.indices = []
for i,entity in enumerate(SHOWN_ENTITY_NAMES):
if entity in entity_color_map:
rect = Rectangle(
height = RECT_HIGHT,
width = RECT_WIDTH,
color = entity_color_map[entity],
fill_opacity = 1)
if SHOW_CN_NAMES:
name_to_show = online_translation(entity)
rect_name = TextMobject(name_to_show).scale(
RECT_TEXT_SCALE_FACTOR)
else:
rect_name = TextMobject(entity).scale(
RECT_TEXT_SCALE_FACTOR)
if i == 0:
rect.shift(RECT_POSITION)
rect_name.next_to(rect,RIGHT)
else:
rect.align_to(self.color_lables,direction = LEFT+DOWN)
rect.shift(DOWN* RECT_HIGHT*RECT_INTERVAL_FACTOR)
rect_name.next_to(rect,RIGHT)
self.color_lables_animation.append(
AnimationGroup(
FadeIn(rect),
Write(rect_name)
)
)
self.color_lables.add(rect,rect_name)
indice = []
for j,name in enumerate(self.entity_names):
if self.COLORMAT[j] == COLOR_LABLE_DICT[entity]:
indice.append(j)
generate_circle_matrix(indice)
self.indices.append(indice)
def sort_by_index(listA,index_list):
assert(len(listA) == len(index_list))
new_list = []
for z,element in enumerate(listA):
new_list.append(listA[index_list[z]])
return new_list
index_list = []
for indice in self.indices:
index_list = index_list + indice
new_bubbles = sort_by_index(transfered_bubbles,index_list)
#self.bubbles = VGroup()
for bubble in new_bubbles:
self.bubbles.add(bubble)
#self.lables.add(self.color_lables)
self.add(self.bubbles)
#sort the animation by data index
#originally theanimation list is sort by creation order
def Get_Hightlight_Animation(
self,
names_to_show,#list of str, must be elements in self.entity_names
wait_time = None,#a number or list of numbers, lens must match the number of entities
intersection_wait_time = 1,
directions = None,
#directions is a list of direction vectors,
#lens must match the number of entities,
#if none, lables will choose
lable_sizes = None,
#lable_sizes is a list of numbers indicating the size of each lable
#lens must match the number of entities to show
wiggle_time = None,
wiggle_factor = None,
fadeout_time = None,
current_time_index = 0,
fadeout_at_once = False#
):
if isinstance(names_to_show,list):
numbers_of_entities = len(names_to_show)
else:
numbers_of_entities = 1
if directions is None:
directions = [UP]*numbers_of_entities
if wiggle_factor is None:
wiggle_factor = [1.5]*numbers_of_entities
if wiggle_time is None:
wiggle_time = [1.5]*numbers_of_entities
if lable_sizes is None:
lable_sizes = [0.7]*numbers_of_entities
if fadeout_time is None:
fadeout_time = [1]*numbers_of_entities
if wait_time is None:
wait_time = [1]*numbers_of_entities
old_lables = []
new_lables = []
indices = []
animation = []
#TODO: add empty animation more efficiently
#Currently I add empty animation the dumb way!
#add a black dot outside the screen!
#I don't know how to add empty
dumb_dot = Dot(color = BLACK).shift(100*UR)
intersection_wait_animation = ApplyMethod(
dumb_dot.shift,
0.1*RIGHT,
run_time = intersection_wait_time
)
for i,name in enumerate(names_to_show):
if name in self.entity_names:
indices.append(self.entity_names.index(name))
if SHOW_CN_NAMES:
name_to_show = online_translation(name)
else:
name_to_show = name
mid_wait_animation = ApplyMethod(
dumb_dot.shift,
0.1*RIGHT,
run_time = wait_time[i]
)
old_lables.append(
TextMobject(
name_to_show,
color = self.COLORMAT[indices[i]]
).scale(0.1)
)
new_lables.append(
TextMobject(
name_to_show,
color = self.COLORMAT[indices[i]]
).scale(lable_sizes[i])
)
old_lables[i].move_to(
self.coordinates[indices[i],current_time_index]
)
new_lables[i].next_to(
self.bubbles.submobjects[indices[i]],
directions[i]
)
animation.append(
ShowCreation(old_lables[i],run_time = 0.02)
)
'''
animation.append(
AnimationGroup(
ReplacementTransform(
old_lables[i],
new_lables[i],
run_time = wiggle_time[i]
),
WiggleOutThenIn(
self.bubbles.submobjects[indices[i]],
scale_value = wiggle_factor[i],
run_time = wiggle_time[i]
)
)
)
'''
animation.append(
[ReplacementTransform(
old_lables[i],
new_lables[i],
run_time = wiggle_time[i]
),
WiggleOutThenIn(
self.bubbles.submobjects[indices[i]],
scale_value = wiggle_factor[i],
run_time = wiggle_time[i]
)]
)
animation.append(mid_wait_animation)
if not fadeout_at_once:
animation.append(
FadeOut(
new_lables[i],
run_time = fadeout_time[i]
)
)
animation.append(intersection_wait_animation)
if fadeout_at_once:
lables = VGroup()
for lable in new_lables:
lables.add(lable)
print(len(lables.submobjects))
animation.append(FadeOut(lables,run_time = fadeout_time[0]))
print(len(animation))
return animation
def Time_Update_Animation(
self,
t,
bubble_transform_time = BUBBLE_TRANSFROMATION_RUN_TIME,
time_lable_run_time = TIME_LABLE_TRANSFROMATION_RUN_TIME,
show_track = False
):
# to a specific time, t can be index(integer) within range(0,len(self.times+1))
# or t can be a element in self.times
args = []
#if t is not a index, we need to convert it into index
if not isinstance(t,int):
if not t in self.times:
raise Exception("input argument 't' is not in self.times")
else:#if t is not a index, but is a element in self.times
t = self.times.index(t)
#update the bubbles
new_circles = VGroup()
for i,bubbledata in enumerate(zip(self.coordinates[:,t],self.radiusdata[:,t])):
new_circle = Circle(
radius = bubbledata[1],
color = self.COLORMAT[i],
fill_opacity = FILL_OPACITY).shift(bubbledata[0]
)
new_circles.add(new_circle)
new_circles, self.bubbles = self.bubbles,new_circles
if not show_track:
args.append(
ReplacementTransform(
new_circles,
self.bubbles,
run_time = bubble_transform_time
)
)
else:
args.append(
Transform(
new_circles,
self.bubbles,
run_time = bubble_transform_time
)
)
'''
#new_circle,self.bubbles.submobjects[i] = self.bubbles.submobjects[i],new_circle
if not show_track:
args.append(
ReplacementTransform(
new_circle,
self.bubbles.submobjects[i],
run_time = bubble_transform_time
)
)
else:
args.append(
Transform(
new_circle,
self.bubbles.submobjects[i],
run_time = bubble_transform_time
)
)
'''
# update the time lable:
if hasattr(self,"time_lable"):
new_time_lable = (TextMobject(
str(self.times[t]),
color = TIME_LABLE_COLOR).scale(
TIME_LABLE_SCALE_FACTOR
)
).shift(TIME_LABLE_POSITION)
new_time_lable,self.time_lable = self.time_lable,new_time_lable
args.append(
ReplacementTransform(
new_time_lable,
self.time_lable,
run_time = time_lable_run_time)
)
return args
|
inversefed/utils.py | hyunjoors/invertinggradients | 119 | 45813 | <reponame>hyunjoors/invertinggradients
"""Various utilities."""
import os
import csv
import torch
import random
import numpy as np
import socket
import datetime
def system_startup(args=None, defs=None):
"""Print useful system information."""
# Choose GPU device and print status information:
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
setup = dict(device=device, dtype=torch.float) # non_blocking=NON_BLOCKING
print('Currently evaluating -------------------------------:')
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
print(f'CPUs: {torch.get_num_threads()}, GPUs: {torch.cuda.device_count()} on {socket.gethostname()}.')
if args is not None:
print(args)
if defs is not None:
print(repr(defs))
if torch.cuda.is_available():
print(f'GPU : {torch.cuda.get_device_name(device=device)}')
return setup
def save_to_table(out_dir, name, dryrun, **kwargs):
"""Save keys to .csv files. Function adapted from Micah."""
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, f'table_{name}.csv')
fieldnames = list(kwargs.keys())
# Read or write header
try:
with open(fname, 'r') as f:
reader = csv.reader(f, delimiter='\t')
header = [line for line in reader][0]
except Exception as e:
print('Creating a new .csv table...')
with open(fname, 'w') as f:
writer = csv.DictWriter(f, delimiter='\t', fieldnames=fieldnames)
writer.writeheader()
if not dryrun:
# Add row for this experiment
with open(fname, 'a') as f:
writer = csv.DictWriter(f, delimiter='\t', fieldnames=fieldnames)
writer.writerow(kwargs)
print('\nResults saved to ' + fname + '.')
else:
print(f'Would save results to {fname}.')
print(f'Would save these keys: {fieldnames}.')
def set_random_seed(seed=233):
"""233 = 144 + 89 is my favorite number."""
torch.manual_seed(seed + 1)
torch.cuda.manual_seed(seed + 2)
torch.cuda.manual_seed_all(seed + 3)
np.random.seed(seed + 4)
torch.cuda.manual_seed_all(seed + 5)
random.seed(seed + 6)
def set_deterministic():
"""Switch pytorch into a deterministic computation mode."""
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
tests/python/cuda/test_features.py | jakeKonrad/torch-quiver | 196 | 45846 | import torch
import torch_quiver as torch_qv
import random
import numpy as np
import time
from typing import List
from quiver.shard_tensor import ShardTensor, ShardTensorConfig, Topo
from quiver.utils import reindex_feature
import torch.multiprocessing as mp
from torch.multiprocessing import Process
import os
import sys
import quiver
import torch.distributed as dist
import torch
import torch_quiver as torch_qv
import random
import numpy as np
import time
from typing import List
from quiver.shard_tensor import ShardTensor, ShardTensorConfig, Topo
from quiver.utils import reindex_feature
__all__ = ["Feature"]
class Feature:
def __init__(self,
rank,
device_list,
device_cache_size=0,
cache_policy='device_replicate',
csr_topo=None):
self.device_cache_size = device_cache_size
self.cache_policy = cache_policy
self.device_list = device_list
self.device_tensor_list = {}
self.numa_tensor_list = {}
self.rank = rank
self.topo = Topo(self.device_list)
self.csr_topo = csr_topo
self.ipc_handle_ = None
def cal_memory_budget_bytes(self, memory_budget):
if isinstance(memory_budget, int):
return memory_budget
elif isinstance(memory_budget, float):
memory_budget = int(memory_budget)
elif isinstance(memory_budget, str):
if memory_budget.upper().endswith(
"M") or memory_budget.upper().endswith("MB"):
end = -1 if memory_budget.upper().endswith("M") else -2
memory_budget = int(float(memory_budget[:end]) * 1024 * 1024)
elif memory_budget.upper().endswith(
"G") or memory_budget.upper().endswith("GB"):
end = -1 if memory_budget.upper().endswith("G") else -2
memory_budget = int(
float(memory_budget[:end]) * 1024 * 1024 * 1024)
else:
raise Exception("memory budget input is not valid")
return memory_budget
def cal_size(self, cpu_tensor, cache_memory_budget):
element_size = cpu_tensor.shape[1] * 4
cache_size = cache_memory_budget // element_size
return cache_size
def partition(self, cpu_tensor, cache_memory_budget):
cache_size = self.cal_size(cpu_tensor, cache_memory_budget)
return [cpu_tensor[:cache_size], cpu_tensor[cache_size:]]
def from_cpu_tensor(self, cpu_tensor):
if self.cache_policy == "device_replicate":
cache_memory_budget = self.cal_memory_budget_bytes(
self.device_cache_size)
shuffle_ratio = 0.0
else:
cache_memory_budget = self.cal_memory_budget_bytes(
self.device_cache_size) * len(self.topo.Numa2Device[0])
shuffle_ratio = self.cal_size(
cpu_tensor, cache_memory_budget) / cpu_tensor.size(0)
print(
f"LOG>>> {min(100, int(100 * cache_memory_budget / cpu_tensor.numel() / 4))}% data cached"
)
if self.csr_topo is not None:
print("Create")
cpu_tensor, self.csr_topo.feature_order = reindex_feature(
self.csr_topo, cpu_tensor, shuffle_ratio)
self.feature_order = self.csr_topo.feature_order.to(self.rank)
print("Done Create")
cache_part, self.cpu_part = self.partition(cpu_tensor,
cache_memory_budget)
self.cpu_part = self.cpu_part.clone()
if cache_part.shape[0] > 0 and self.cache_policy == "device_replicate":
for device in self.device_list:
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
shard_tensor.append(cache_part, device)
self.device_tensor_list[device] = shard_tensor
elif cache_part.shape[0] > 0:
numa0_device_list = self.topo.Numa2Device[0]
numa1_device_list = self.topo.Numa2Device[1]
block_size = self.cal_size(
cpu_tensor,
cache_memory_budget // len(self.topo.Numa2Device[0]))
if len(numa0_device_list) > 0:
print(
f"LOG>>> GPU {numa0_device_list} belong to the same NUMA Domain"
)
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
cur_pos = 0
for idx, device in enumerate(numa0_device_list):
if idx == len(numa0_device_list) - 1:
shard_tensor.append(cache_part[cur_pos:], device)
else:
shard_tensor.append(
cache_part[cur_pos:cur_pos + block_size], device)
cur_pos += block_size
self.numa_tensor_list[0] = shard_tensor
if len(numa1_device_list) > 0:
print(
f"LOG>>> GPU {numa1_device_list} belong to the same NUMA Domain"
)
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
cur_pos = 0
for idx, device in enumerate(numa1_device_list):
if idx == len(numa1_device_list) - 1:
shard_tensor.append(cache_part[cur_pos:], device)
else:
shard_tensor.append(
cache_part[cur_pos:cur_pos + block_size], device)
cur_pos += block_size
self.numa_tensor_list[1] = shard_tensor
# 构建CPU Tensor
if self.cpu_part.numel() > 0:
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list.get(
self.rank, None) or ShardTensor(self.rank,
ShardTensorConfig({}))
shard_tensor.append(self.cpu_part, -1)
self.device_tensor_list[self.rank] = shard_tensor
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list.get(
numa_id, None) or ShardTensor(self.rank,
ShardTensorConfig({}))
shard_tensor.append(self.cpu_part, -1)
self.numa_tensor_list[numa_id] = shard_tensor
def __getitem__(self, node_idx):
self.lazy_init_from_ipc_handle()
node_idx = node_idx.to(self.rank)
if self.feature_order is not None:
node_idx = self.feature_order[node_idx]
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor[node_idx]
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor[node_idx]
def size(self, dim):
self.lazy_init_from_ipc_handle()
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor.size(dim)
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor.size(dim)
@property
def shape(self):
self.lazy_init_from_ipc_handle()
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor.shape
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor.shape
@property
def ipc_handle(self):
return self.ipc_handle_
@ipc_handle.setter
def ipc_handle(self, ipc_handle):
self.ipc_handle_ = ipc_handle
def share_ipc(self):
gpu_ipc_handle_dict = {}
if self.cache_policy == "device_replicate":
for device in self.device_tensor_list:
gpu_ipc_handle_dict[device] = self.device_tensor_list[
device].share_ipc()[0]
else:
for numa_node in self.numa_tensor_list:
gpu_ipc_handle_dict[numa_node] = self.numa_tensor_list[
numa_node].share_ipc()[0]
return gpu_ipc_handle_dict, self.cpu_part, self.device_list, self.device_cache_size, self.cache_policy, self.csr_topo
def from_gpu_ipc_handle_dict(self, gpu_ipc_handle_dict, cpu_tensor):
if self.cache_policy == "device_replicate":
ipc_handle = gpu_ipc_handle_dict.get(
self.rank, []), cpu_tensor, ShardTensorConfig({})
shard_tensor = ShardTensor.new_from_share_ipc(
ipc_handle, self.rank)
self.device_tensor_list[self.rank] = shard_tensor
else:
numa_node = self.topo.get_numa_node(self.rank)
ipc_handle = gpu_ipc_handle_dict.get(
numa_node, []), cpu_tensor, ShardTensorConfig({})
shard_tensor = ShardTensor.new_from_share_ipc(
ipc_handle, self.rank)
self.numa_tensor_list[numa_node] = shard_tensor
self.cpu_part = cpu_tensor
@classmethod
def new_from_ipc_handle(cls, rank, ipc_handle):
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, csr_topo = ipc_handle
feature = cls(rank, device_list, device_cache_size, cache_policy)
feature.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict, cpu_part)
if csr_topo is not None:
feature.feature_order = csr_topo.feature_order.to(rank)
self.csr_topo = csr_topo
return feature
@classmethod
def lazy_from_ipc_handle(cls, ipc_handle):
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, _ = ipc_handle
feature = cls(device_list[0], device_list, device_cache_size,
cache_policy)
feature.ipc_handle = ipc_handle
return feature
def lazy_init_from_ipc_handle(self):
if self.ipc_handle is None:
return
self.rank = torch.cuda.current_device()
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, csr_topo = self.ipc_handle
self.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict, cpu_part)
self.csr_topo = csr_topo
if csr_topo is not None:
self.feature_order = csr_topo.feature_order.to(self.rank)
self.ipc_handle = None
from multiprocessing.reduction import ForkingPickler
def rebuild_feature(ipc_handle):
print("check rebuild")
feature = Feature.lazy_from_ipc_handle(ipc_handle)
return feature
def reduce_feature(feature):
ipc_handle = feature.share_ipc()
return (rebuild_feature, (ipc_handle, ))
def rebuild_pyg_sampler(cls, ipc_handle):
sampler = cls.lazy_from_ipc_handle(ipc_handle)
return sampler
def reduce_pyg_sampler(sampler):
ipc_handle = sampler.share_ipc()
return (rebuild_pyg_sampler, (
type(sampler),
ipc_handle,
))
def init_reductions():
ForkingPickler.register(Feature, reduce_feature)
def test_feature_basic():
rank = 0
NUM_ELEMENT = 1000000
SAMPLE_SIZE = 80000
FEATURE_DIM = 600
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
host_indice = np.random.randint(0, 2 * NUM_ELEMENT - 1, (SAMPLE_SIZE, ))
indices = torch.from_numpy(host_indice).type(torch.long)
print("host data size", host_tensor.size * 4 // 1024 // 1024, "MB")
device_indices = indices.to(rank)
############################
# define a quiver.Feature
###########################
feature = quiver.Feature(rank=rank,
device_list=[0, 1, 2, 3],
device_cache_size="0.9G",
cache_policy="numa_replicate")
feature.from_cpu_tensor(tensor)
####################
# Indexing
####################
res = feature[device_indices]
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
res = res.cpu().numpy()
feature_gt = tensor[indices].numpy()
print("Correctness Check : ", np.array_equal(res, feature_gt))
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.size * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
def child_proc(rank, world_size, host_tensor, feature):
torch.cuda.set_device(rank)
print(
f"Process {os.getpid()}: check current device {torch.cuda.current_device()}"
)
NUM_ELEMENT = host_tensor.shape[0]
SAMPLE_SIZE = 80000
device_tensor = host_tensor.to(rank)
bandwidth = []
for _ in range(30):
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
torch.cuda.synchronize()
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
bandwidth.append(res.numel() * 4 / consumed_time / 1024 / 1024 / 1024)
assert torch.equal(res, device_tensor[device_indices])
print("Correctness check passed")
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel() * 4 / 1024 / 1024 / 1024}GB"
)
def test_ipc():
rank = 0
NUM_ELEMENT = 1000000
FEATURE_DIM = 600
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
print("host data size", host_tensor.size * 4 // 1024 // 1024, "MB")
############################
# define a quiver.Feature
###########################
feature = quiver.Feature(rank=rank,
device_list=[0, 1],
device_cache_size=0,
cache_policy="numa_replicate")
feature.from_cpu_tensor(tensor)
world_size = 2
mp.spawn(child_proc,
args=(world_size, tensor, feature),
nprocs=world_size,
join=True)
def child_proc_real_data(rank, feature, host_tensor):
NUM_ELEMENT = 2000000
SAMPLE_SIZE = 800000
bandwidth = []
torch.cuda.set_device(rank)
device_tensor = host_tensor.to(rank)
for _ in range(300):
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
torch.cuda.synchronize()
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
bandwidth.append(res.numel() * 4 / consumed_time / 1024 / 1024 / 1024)
assert torch.equal(device_tensor[device_indices], res)
print("Correctness check passed")
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel() * 4 / 1024 / 1024 / 1024}GB"
)
def test_ipc_with_real_data():
from ogb.nodeproppred import PygNodePropPredDataset
root = "/data/data/products"
dataset = PygNodePropPredDataset('ogbn-products', root)
data = dataset[0]
world_size = torch.cuda.device_count()
##############################
# Create Sampler And Feature
##############################
csr_topo = quiver.CSRTopo(data.edge_index)
feature = torch.zeros(data.x.shape)
feature[:] = data.x
quiver_feature = Feature(rank=0,
device_list=list(range(world_size)),
device_cache_size="200M",
cache_policy="device_replicate",
csr_topo=csr_topo)
quiver_feature.from_cpu_tensor(feature)
print('Let\'s use', world_size, 'GPUs!')
mp.spawn(child_proc_real_data,
args=(quiver_feature, feature),
nprocs=world_size,
join=True)
def normal_test():
rank = 0
NUM_ELEMENT = 1000000
FEATURE_DIM = 600
SAMPLE_SIZE = 80000
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
host_indice = np.random.randint(0, 2 * NUM_ELEMENT - 1, (SAMPLE_SIZE, ))
indices = torch.from_numpy(host_indice).type(torch.long)
tensor.to(rank)
torch.cuda.synchronize()
start = time.time()
feature = tensor[indices]
feature = feature.to(rank)
torch.cuda.synchronize()
consumed_time = time.time() - start
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {feature.numel() * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
def test_paper100M():
dataset = torch.load(
"/data/papers/ogbn_papers100M/quiver_preprocess/paper100M.pth")
csr_topo = dataset["csr_topo"]
feature = dataset["sorted_feature"]
NUM_ELEMENT = feature.shape[0]
SAMPLE_SIZE = 80000
world_size = 4
rank = 0
dataset["label"] = torch.from_numpy(dataset["label"])
dataset["num_features"] = feature.shape[1]
dataset["num_classes"] = 172
quiver_sampler = quiver.pyg.GraphSageSampler(csr_topo, [15, 10, 5],
0,
mode="UVA")
quiver_feature = quiver.Feature(rank=0,
device_list=list(range(world_size)),
device_cache_size="12G",
cache_policy="numa_replicate")
quiver_feature.from_cpu_tensor(feature)
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
res = quiver_feature[device_indices]
start = time.time()
res = quiver_feature[device_indices]
consumed_time = time.time() - start
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.numel() * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
if __name__ == "__main__":
mp.set_start_method("spawn")
torch_qv.init_p2p([0, 1, 2, 3])
test_paper100M()
#init_reductions()
#test_feature_basic()
#test_ipc()
#normal_test()
#test_ipc_with_real_data()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.