max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
chainercv/transforms/image/random_sized_crop.py | souravsingh/chainercv | 1,600 | 11139789 | from __future__ import division
import math
import numpy as np
import random
def random_sized_crop(img,
scale_ratio_range=(0.08, 1),
aspect_ratio_range=(3 / 4, 4 / 3),
return_param=False, copy=False):
"""Crop an image to random size and aspect ratio.
The size :math:`(H_{crop}, W_{crop})` and the left top coordinate
:math:`(y_{start}, x_{start})` of the crop are calculated as follows:
+ :math:`H_{crop} = \\lfloor{\\sqrt{s \\times H \\times W \
\\times a}}\\rfloor`
+ :math:`W_{crop} = \\lfloor{\\sqrt{s \\times H \\times W \
\\div a}}\\rfloor`
+ :math:`y_{start} \\sim Uniform\\{0, H - H_{crop}\\}`
+ :math:`x_{start} \\sim Uniform\\{0, W - W_{crop}\\}`
+ :math:`s \\sim Uniform(s_1, s_2)`
+ :math:`b \\sim Uniform(a_1, a_2)` and \
:math:`a = b` or :math:`a = \\frac{1}{b}` in 50/50 probability.
Here, :math:`s_1, s_2` are the two floats in
:obj:`scale_ratio_range` and :math:`a_1, a_2` are the two floats
in :obj:`aspect_ratio_range`.
Also, :math:`H` and :math:`W` are the height and the width of the image.
Note that :math:`s \\approx \\frac{H_{crop} \\times W_{crop}}{H \\times W}`
and :math:`a \\approx \\frac{H_{crop}}{W_{crop}}`.
The approximations come from flooring floats to integers.
.. note::
When it fails to sample a valid scale and aspect ratio for ten
times, it picks values in a non-uniform way.
If this happens, the selected scale ratio can be smaller
than :obj:`scale_ratio_range[0]`.
Args:
img (~numpy.ndarray): An image array. This is in CHW format.
scale_ratio_range (tuple of two floats): Determines
the distribution from which a scale ratio is sampled.
The default values are selected so that the area of the crop is
8~100% of the original image. This is the default
setting used to train ResNets in Torch style.
aspect_ratio_range (tuple of two floats): Determines
the distribution from which an aspect ratio is sampled.
The default values are
:math:`\\frac{3}{4}` and :math:`\\frac{4}{3}`, which
are also the default setting to train ResNets in Torch style.
return_param (bool): Returns parameters if :obj:`True`.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns only the cropped image.
If :obj:`return_param = True`,
returns a tuple of cropped image and :obj:`param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **y_slice** (*slice*): A slice used to crop the input image.\
The relation below holds together with :obj:`x_slice`.
* **x_slice** (*slice*): Similar to :obj:`y_slice`.
.. code::
out_img = img[:, y_slice, x_slice]
* **scale_ratio** (float): :math:`s` in the description (see above).
* **aspect_ratio** (float): :math:`a` in the description.
"""
_, H, W = img.shape
scale_ratio, aspect_ratio =\
_sample_parameters(
(H, W), scale_ratio_range, aspect_ratio_range)
H_crop = int(math.floor(np.sqrt(scale_ratio * H * W * aspect_ratio)))
W_crop = int(math.floor(np.sqrt(scale_ratio * H * W / aspect_ratio)))
y_start = random.randint(0, H - H_crop)
x_start = random.randint(0, W - W_crop)
y_slice = slice(y_start, y_start + H_crop)
x_slice = slice(x_start, x_start + W_crop)
img = img[:, y_slice, x_slice]
if copy:
img = img.copy()
if return_param:
params = {'y_slice': y_slice, 'x_slice': x_slice,
'scale_ratio': scale_ratio, 'aspect_ratio': aspect_ratio}
return img, params
else:
return img
def _sample_parameters(size, scale_ratio_range, aspect_ratio_range):
H, W = size
for _ in range(10):
aspect_ratio = random.uniform(
aspect_ratio_range[0], aspect_ratio_range[1])
if random.uniform(0, 1) < 0.5:
aspect_ratio = 1 / aspect_ratio
# This is determined so that relationships "H - H_crop >= 0" and
# "W - W_crop >= 0" are always satisfied.
scale_ratio_max = min((scale_ratio_range[1],
H / (W * aspect_ratio),
(aspect_ratio * W) / H))
scale_ratio = random.uniform(
scale_ratio_range[0], scale_ratio_range[1])
if scale_ratio_range[0] <= scale_ratio <= scale_ratio_max:
return scale_ratio, aspect_ratio
# This scale_ratio is outside the given range when
# scale_ratio_max < scale_ratio_range[0].
scale_ratio = random.uniform(
min((scale_ratio_range[0], scale_ratio_max)), scale_ratio_max)
return scale_ratio, aspect_ratio
|
worldengine/drawing_functions.py | ctittel/worldengine | 946 | 11139808 | """
This file should contain only functions that operates on pixels, not on images,
so no references to PIL are necessary and the module can be used also through
Jython
"""
import numpy
import sys
import time
from worldengine.common import get_verbose, count_neighbours
from worldengine.common import anti_alias as anti_alias_channel
from worldengine.biome import BiomeGroup, _un_camelize
# -------------------
# Reusable functions
# -------------------
def gradient(value, low, high, low_color, high_color):
lr, lg, lb = low_color
if high == low:
return lr, lg, lb, 255
_range = float(high - low)
_x = float(value - low) / _range
_ix = 1.0 - _x
hr, hg, hb = high_color
r = int(lr * _ix + hr * _x)
g = int(lg * _ix + hg * _x)
b = int(lb * _ix + hb * _x)
return r, g, b, 255
def rgba_to_rgb(rgba):
r, g, b, a = rgba
return r, g, b
def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255))
# -------------------
# Drawing ancient map
# -------------------
def _find_mountains_mask(world, factor):
_mask = numpy.zeros((world.height, world.width), float)
_mask[world.elevation>world.get_mountain_level()] = 1.0
# disregard elevated oceans
_mask[world.ocean] = 0.0
# this is fast but not 100% precise
# subsequent steps are fiendishly sensitive to these precision errors
# therefore the rounding
_mask[_mask>0] = numpy.around(count_neighbours(_mask, 3)[_mask>0], 6)
_mask[_mask<32.000000001] = 0.0
_mask /= 4.0
_mask = _mask.repeat(factor, 0).repeat(factor, 1)
return _mask
def _build_biome_group_masks(world, factor):
biome_groups = BiomeGroup.__subclasses__()
biome_masks = {}
for group in biome_groups:
group_mask = numpy.zeros((world.height, world.width), float)
for biome in group.__subclasses__():
group_mask[world.biome==_un_camelize(biome.__name__)] += 1.0
group_mask[group_mask>0] = count_neighbours(group_mask)[group_mask>0]
group_mask[group_mask<5.000000001] = 0.0
group_mask = group_mask.repeat(factor, 0).repeat(factor, 1)
biome_masks[_un_camelize(group.__name__)] = group_mask
return biome_masks
def _draw_shaded_pixel(pixels, x, y, r, g, b):
nb = (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
nr = r - nb
ng = g - nb
nb = b - nb
pixels[y, x] = (nr, ng, nb, 255)
def _draw_forest_pattern1(pixels, x, y, c, c2):
pixels[y - 4, x + 0] = c
pixels[y - 3, x + 0] = c
pixels[y - 2, x - 1] = c
pixels[y - 2, x + 1] = c
pixels[y - 1, x - 1] = c
pixels[y - 1, x + 1] = c
pixels[y + 0, x - 2] = c
pixels[y + 0, x + 1] = c
pixels[y + 0, x + 2] = c
pixels[y + 1, x - 2] = c
pixels[y + 1, x + 2] = c
pixels[y + 2, x - 3] = c
pixels[y + 2, x - 1] = c
pixels[y + 2, x + 3] = c
pixels[y + 3, x - 3] = c
pixels[y + 3, x - 2] = c
pixels[y + 3, x - 1] = c
pixels[y + 3, x - 0] = c
pixels[y + 3, x + 1] = c
pixels[y + 3, x + 2] = c
pixels[y + 3, x + 3] = c
pixels[y + 4, x - 0] = c
pixels[y - 2, x + 0] = c2
pixels[y - 1, x + 0] = c2
pixels[y - 0, x - 1] = c2
pixels[y - 0, x - 0] = c2
pixels[y + 1, x - 1] = c2
pixels[y + 1, x - 0] = c2
pixels[y + 1, x + 1] = c2
pixels[y + 2, x - 2] = c2
pixels[y + 2, x - 0] = c2
pixels[y + 2, x + 1] = c2
pixels[y + 2, x + 2] = c2
def _draw_forest_pattern2(pixels, x, y, c, c2):
pixels[y - 4, x - 1] = c
pixels[y - 4, x - 0] = c
pixels[y - 4, x + 1] = c
pixels[y - 3, x - 2] = c
pixels[y - 3, x - 1] = c
pixels[y - 3, x + 2] = c
pixels[y - 2, x - 2] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 2] = c
pixels[y - 1, x - 2] = c
pixels[y - 1, x + 2] = c
pixels[y - 0, x - 2] = c
pixels[y - 0, x - 1] = c
pixels[y - 0, x + 2] = c
pixels[y + 1, x - 2] = c
pixels[y + 1, x + 1] = c
pixels[y + 1, x + 2] = c
pixels[y + 2, x - 1] = c
pixels[y + 2, x - 0] = c
pixels[y + 2, x + 1] = c
pixels[y + 3, x - 0] = c
pixels[y + 4, x - 0] = c
pixels[y - 3, x + 0] = c2
pixels[y - 3, x + 1] = c2
pixels[y - 2, x - 1] = c2
pixels[y - 2, x - 0] = c2
pixels[y - 1, x - 1] = c2
pixels[y - 1, x - 0] = c2
pixels[y - 1, x + 1] = c2
pixels[y - 0, x - 0] = c2
pixels[y - 0, x + 1] = c2
pixels[y + 1, x - 1] = c2
pixels[y + 1, x - 0] = c2
def _draw_desert_pattern(pixels, x, y, c):
pixels[y - 2, x - 1] = c
pixels[y - 2, x - 0] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 2] = c
pixels[y - 1, x - 2] = c
pixels[y - 1, x - 1] = c
pixels[y - 1, x - 0] = c
pixels[y - 1, x + 4] = c
pixels[y - 0, x - 4] = c
pixels[y - 0, x - 3] = c
pixels[y - 0, x - 2] = c
pixels[y - 0, x - 1] = c
pixels[y - 0, x + 1] = c
pixels[y - 0, x + 2] = c
pixels[y - 0, x + 6] = c
pixels[y + 1, x - 5] = c
pixels[y + 1, x - 0] = c
pixels[y + 1, x + 7] = c
pixels[y + 1, x + 8] = c
pixels[y + 2, x - 8] = c
pixels[y + 2, x - 7] = c
def _draw_glacier(pixels, x, y):
rg = 255 - (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
pixels[y, x] = (rg, rg, 255, 255)
def _draw_cold_parklands(pixels, x, y, w, h):
b = (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
r = 105 - b
g = 96 - b
b = 38 - int(b / 2)
pixels[y, x] = (r, g, b, 255)
def _draw_boreal_forest(pixels, x, y, w, h):
c = (0, 32, 0, 255)
c2 = (0, 64, 0, 255)
_draw_forest_pattern1(pixels, x, y, c, c2)
def _draw_warm_temperate_forest(pixels, x, y, w, h):
c = (0, 96, 0, 255)
c2 = (0, 192, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_temperate_forest1(pixels, x, y, w, h):
c = (0, 64, 0, 255)
c2 = (0, 96, 0, 255)
_draw_forest_pattern1(pixels, x, y, c, c2)
def _draw_temperate_forest2(pixels, x, y, w, h):
c = (0, 64, 0, 255)
c2 = (0, 112, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_tropical_dry_forest(pixels, x, y, w, h):
c = (51, 36, 3, 255)
c2 = (139, 204, 58, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_jungle(pixels, x, y, w, h):
c = (0, 128, 0, 255)
c2 = (0, 255, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_cool_desert(pixels, x, y, w, h):
c = (72, 72, 53, 255)
# c2 = (219, 220, 200, 255) # TODO: not used?
_draw_desert_pattern(pixels, x, y, c)
def _draw_hot_desert(pixels, x, y, w, h):
c = (72, 72, 53, 255)
# c2 = (219, 220, 200, 255) # TODO: not used?
_draw_desert_pattern(pixels, x, y, c)
def _draw_tundra(pixels, x, y, w, h):
_draw_shaded_pixel(pixels,x, y, 166, 148, 75)
def _draw_steppe(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 96, 192, 96)
def _draw_chaparral(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 180, 171, 113)
def _draw_savanna(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 255, 246, 188)
# TODO: complete and enable this one
def _dynamic_draw_a_mountain(pixels, rng, x, y, w=3, h=3):
# mcl = (0, 0, 0, 255) # TODO: No longer used?
# mcll = (128, 128, 128, 255)
mcr = (75, 75, 75, 255)
# left edge
last_leftborder = None
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
min_leftborder = int(bottomness * w * 0.66)
if not last_leftborder == None:
min_leftborder = max(min_leftborder, last_leftborder - 1)
max_leftborder = int(bottomness * w * 1.33)
if not last_leftborder == None:
max_leftborder = min(max_leftborder, last_leftborder + 1)
leftborder = int(bottomness * w) + rng.randint(-2, 2)/2
if leftborder < min_leftborder:
leftborder = min_leftborder
if leftborder > max_leftborder:
leftborder = max_leftborder
last_leftborder = leftborder
darkarea = int(bottomness * w / 2)
lightarea = int(bottomness * w / 2)
for itx in range(darkarea, leftborder + 1):
pixels[y + mody, x - itx] = gradient(itx, darkarea, leftborder,
(0, 0, 0), (64, 64, 64))
for itx in range(-darkarea, lightarea + 1):
pixels[y + mody, x - itx] = gradient(itx, -darkarea, lightarea,
(64, 64, 64), (128, 128, 128))
for itx in range(lightarea, leftborder):
pixels[y + mody, x - itx] = (181, 166, 127, 255) # land_color
# right edge
last_modx = None
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
min_modx = int(bottomness * w * 0.66)
if not last_modx == None:
min_modx = max(min_modx, last_modx - 1)
max_modx = int(bottomness * w * 1.33)
if not last_modx == None:
max_modx = min(max_modx, last_modx + 1)
modx = int(bottomness * w) + numpy.random.randint(-2, 2)/2
if modx < min_modx:
modx = min_modx
if modx > max_modx:
modx = max_modx
last_modx = modx
pixels[y + mody, x - itx] = mcr
def _draw_a_mountain(pixels, x, y, w=3, h=3):
# mcl = (0, 0, 0, 255) # TODO: No longer used?
# mcll = (128, 128, 128, 255)
mcr = (75, 75, 75, 255)
# left edge
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
leftborder = int(bottomness * w)
darkarea = int(bottomness * w / 2)
lightarea = int(bottomness * w / 2)
for itx in range(darkarea, leftborder + 1):
pixels[y + mody, x - itx] = gradient(itx, darkarea, leftborder,
(0, 0, 0), (64, 64, 64))
for itx in range(-darkarea, lightarea + 1):
pixels[y + mody, x + itx] = gradient(itx, -darkarea, lightarea,
(64, 64, 64), (128, 128, 128))
for itx in range(lightarea, leftborder):
pixels[y + mody, x + itx] = (181, 166, 127, 255) # land_color
# right edge
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
modx = int(bottomness * w)
pixels[y + mody, x + modx] = mcr
def draw_ancientmap(world, target, resize_factor=1,
sea_color=(212, 198, 169, 255),
draw_biome = True, draw_rivers = True, draw_mountains = True,
draw_outer_land_border = False, verbose=get_verbose()):
rng = numpy.random.RandomState(world.seed) # create our own random generator
if verbose:
start_time = time.time()
land_color = (
181, 166, 127, 255) # TODO: Put this in the argument list too??
scaled_ocean = world.ocean.repeat(resize_factor, 0).repeat(resize_factor, 1)
borders = numpy.zeros((resize_factor * world.height, resize_factor * world.width), bool)
borders[count_neighbours(scaled_ocean) > 0] = True
borders[scaled_ocean] = False
# cache neighbours count at different radii
border_neighbours = {}
border_neighbours[6] = numpy.rint(count_neighbours(borders, 6))
border_neighbours[9] = numpy.rint(count_neighbours(borders, 9))
if draw_outer_land_border:
inner_borders = borders
outer_borders = None
for i in range(2):
_outer_borders = numpy.zeros((resize_factor * world.height, resize_factor * world.width), bool)
_outer_borders[count_neighbours(inner_borders) > 0] = True
_outer_borders[inner_borders] = False
_outer_borders[numpy.logical_not(scaled_ocean)] = False
outer_borders = _outer_borders
inner_borders = outer_borders
if draw_mountains:
mountains_mask = _find_mountains_mask(world, resize_factor)
if draw_biome:
biome_masks = _build_biome_group_masks(world, resize_factor)
def _draw_biome(name, _func, w, h, r, _alt_func = None):
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if biome_masks[name][y, x] > 0:
if r == 0 or border_neighbours[r][y,x] <= 2:
if _alt_func is not None and rng.random_sample() > .5:
_alt_func(target, x, y, w, h)
else:
_func(target, x, y, w, h)
biome_masks[name][y-r:y+r+1,x-r:x+r+1] = 0.0
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_ancientmap: " + name +
" Elapsed time " + str(elapsed_time) + " seconds.")
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: init Elapsed time " +
str(elapsed_time) + " seconds.")
sys.stdout.flush()
if verbose:
start_time = time.time()
border_color = (0, 0, 0, 255)
outer_border_color = gradient(0.5, 0, 1.0, rgba_to_rgb(border_color), rgba_to_rgb(sea_color))
# start in low resolution
num_channels = 4
channels = numpy.zeros((num_channels, world.height, world.width), int)
for c in range(num_channels):
channels[c] = land_color[c]
channels[c][world.ocean] = sea_color[c]
# now go full resolution
channels = channels.repeat(resize_factor, 1).repeat(resize_factor, 2)
if draw_outer_land_border:
for c in range(num_channels):
channels[c][outer_borders] = outer_border_color[c]
for c in range(num_channels):
channels[c][borders] = border_color[c]
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: color ocean " +
"Elapsed time " + str(elapsed_time) + " seconds.")
if verbose:
start_time = time.time()
# don't anti-alias the alpha channel
for c in range(num_channels-1):
channels[c] = anti_alias_channel(channels[c], 1)
# switch from channel major storage to pixel major storage
for c in range(num_channels):
target[:,:,c] = channels[c,:,:]
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: anti alias " +
"Elapsed time " + str(elapsed_time) + " seconds.")
if draw_biome:
# Draw glacier
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if not borders[y, x] and world.is_iceland(
(int(x / resize_factor), int(y / resize_factor))):
_draw_glacier(target, x, y)
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: draw glacier " +
"Elapsed time " + str(elapsed_time) + " seconds.")
_draw_biome('tundra', _draw_tundra, 0, 0, 0)
_draw_biome('cold parklands', _draw_cold_parklands, 0, 0, 0)
_draw_biome('steppe', _draw_steppe, 0, 0, 0)
_draw_biome('chaparral', _draw_chaparral, 0, 0, 0)
_draw_biome('savanna', _draw_savanna, 0, 0, 0)
_draw_biome('cool desert', _draw_cool_desert, 8, 2, 9)
_draw_biome('hot desert', _draw_hot_desert, 8, 2, 9)
_draw_biome('boreal forest', _draw_boreal_forest, 4, 5, 6)
_draw_biome('cool temperate forest', _draw_temperate_forest1, 4, 5, 6,
_draw_temperate_forest2)
_draw_biome('warm temperate forest', _draw_warm_temperate_forest, 4, 5, 6)
_draw_biome('tropical dry forest group', _draw_tropical_dry_forest, 4, 5, 6)
_draw_biome('jungle', _draw_jungle, 4, 5, 6)
# TODO: there was a stub for a rock desert biome group
# it should be super easy to introduce that group with the new
# biome group concept but since it did nothing I removed the stub
if draw_rivers:
draw_rivers_on_image(world, target, resize_factor)
# Draw mountains
if draw_mountains:
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if mountains_mask[y, x] > 0:
w = mountains_mask[y, x]
h = 3 + int(world.level_of_mountain(
(int(x / resize_factor), int(y / resize_factor))))
r = max(int(w / 3 * 2), h)
if r not in border_neighbours:
border_neighbours[r] = numpy.rint(count_neighbours(borders, r))
if border_neighbours[r][y,x] <= 2:
_draw_a_mountain(target, x, y, w=w, h=h)
mountains_mask[y-r:y+r+1,x-r:x+r+1] = 0.0
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: draw mountains " +
"Elapsed time " + str(elapsed_time) + " seconds.")
|
opsdroid/testing/mockmodules/skills/skill/skilltest/mock.py | JiahnChoi/opsdroid.kr | 712 | 11139828 | """Mock skill."""
|
coverage/disposition.py | timofurrer/coveragepy | 2,254 | 11139834 | <reponame>timofurrer/coveragepy
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Simple value objects for tracking what to do with files."""
class FileDisposition:
"""A simple value type for recording what to do with a file."""
pass
# FileDisposition "methods": FileDisposition is a pure value object, so it can
# be implemented in either C or Python. Acting on them is done with these
# functions.
def disposition_init(cls, original_filename):
"""Construct and initialize a new FileDisposition object."""
disp = cls()
disp.original_filename = original_filename
disp.canonical_filename = original_filename
disp.source_filename = None
disp.trace = False
disp.reason = ""
disp.file_tracer = None
disp.has_dynamic_filename = False
return disp
def disposition_debug_msg(disp):
"""Make a nice debug message of what the FileDisposition is doing."""
if disp.trace:
msg = f"Tracing {disp.original_filename!r}"
if disp.original_filename != disp.source_filename:
msg += f" as {disp.source_filename!r}"
if disp.file_tracer:
msg += ": will be traced by %r" % disp.file_tracer
else:
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
return msg
|
cctbx/sgtbx/direct_space_asu/proto/__init__.py | dperl-sol/cctbx_project | 155 | 11139838 | from __future__ import absolute_import, division, print_function
import sys
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("cctbx_sgtbx_asu_ext")
from cctbx_sgtbx_asu_ext import *
def asu_show_(asu, f=None):
if f is None:
f = sys.stdout
print(asu.as_string(), file=f)
direct_space_asu.show_comprehensive_summary = asu_show_
|
tests/init_test.py | RaSan147/python | 283 | 11139871 | import ipinfo
from ipinfo.handler import Handler
from ipinfo.handler_async import AsyncHandler
def test_get_handler():
handler = ipinfo.getHandler()
assert isinstance(handler, Handler)
def test_get_handler_async():
handler = ipinfo.getHandlerAsync()
assert isinstance(handler, AsyncHandler)
|
src/sparsezoo/requests/download.py | signalism/sparsezoo | 116 | 11139894 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to wrapping around API calls under api.neuralmagic.com/[object]/download
"""
import logging
from typing import Dict, Union
import requests
from sparsezoo.requests.authentication import get_auth_header
from sparsezoo.requests.base import (
MODELS_API_URL,
RECIPES_API_URL,
SPARSEZOO_TEST_MODE,
ModelArgs,
)
__all__ = [
"download_get_request",
"download_model_get_request",
"download_recipe_get_request",
"DOWNLOAD_PATH",
]
_LOGGER = logging.getLogger(__name__)
DOWNLOAD_PATH = "download"
def download_get_request(
base_url: str,
args: Union[ModelArgs, str],
sub_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get a downloadable object from the sparsezoo for any objects matching the args
The path called has structure:
[base_url]/download/[args.stub]/{sub_path}
:param base_url: the base url
:param args: the model args describing what should be downloaded for
:param sub_path: the sub path from the model path if any e.g.
file_name for models api or recipe_type for the recipes api
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
header = get_auth_header(force_token_refresh=force_token_refresh)
path = args if isinstance(args, str) else args.stub
url = f"{base_url}/{DOWNLOAD_PATH}/{path}"
if sub_path:
url = f"{url}/{sub_path}"
download_args = []
if hasattr(args, "release_version") and args.release_version:
download_args.append(f"release_version={args.release_version}")
if SPARSEZOO_TEST_MODE:
download_args.append("increment_download=False")
if download_args:
url = f"{url}?{'&'.join(download_args)}"
_LOGGER.debug(f"GET download from {url}")
response = requests.get(url=url, headers=header)
response.raise_for_status()
response_json = response.json()
return response_json
def download_model_get_request(
args: Union[ModelArgs, str],
file_name: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get a downloadable model from the sparsezoo for any objects matching the args
:param args: the model args describing what should be downloaded for
:param file_name: the name of the file, if any, to get download info for
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return download_get_request(
base_url=MODELS_API_URL,
args=args,
sub_path=file_name,
force_token_refresh=force_token_refresh,
)
def download_recipe_get_request(
args: Union[ModelArgs, str],
recipe_type: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Get a downloadable recipe from the sparsezoo for any objects matching the args
:param args: the model args describing what should be downloaded for
:param recipe_type: the recipe_type to get download info for if not original
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return download_get_request(
base_url=RECIPES_API_URL,
args=args,
sub_path=recipe_type,
force_token_refresh=force_token_refresh,
)
|
examples/volumetric/tet_threshold.py | evanphilip/vedo | 836 | 11139907 | <reponame>evanphilip/vedo
"""Threshold the original TetMesh
with a scalar array"""
from vedo import *
settings.useDepthPeeling = True
tetm = TetMesh(dataurl+'limb_ugrid.vtk')
tetm.color('prism').alpha([0,1])
# Threshold the tetrahedral mesh for values in the range:
tetm.threshold(above=0.9, below=1)
tetm.addScalarBar3D(title='chem_0 expression levels', c='k', italic=1)
show([(tetm,__doc__),
tetm.tomesh(shrink=0.9),
], N=2, axes=1,
).close()
|
assets/src/ba_data/python/bastd/ui/report.py | Benefit-Zebra/ballistica | 317 | 11139918 | # Released under the MIT License. See LICENSE for details.
#
"""UI related to reporting bad behavior/etc."""
from __future__ import annotations
import _ba
import ba
class ReportPlayerWindow(ba.Window):
"""Player for reporting naughty players."""
def __init__(self, account_id: str, origin_widget: ba.Widget):
self._width = 550
self._height = 220
self._account_id = account_id
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
overlay_stack = _ba.get_special_widget('overlay_stack')
uiscale = ba.app.ui.uiscale
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height),
parent=overlay_stack,
transition='in_scale',
scale_origin_stack_offset=scale_origin,
scale=(1.8 if uiscale is ba.UIScale.SMALL else
1.35 if uiscale is ba.UIScale.MEDIUM else 1.0)))
self._cancel_button = ba.buttonwidget(parent=self._root_widget,
scale=0.7,
position=(40, self._height - 50),
size=(50, 50),
label='',
on_activate_call=self.close,
autoselect=True,
color=(0.4, 0.4, 0.5),
icon=ba.gettexture('crossOut'),
iconscale=1.2)
ba.containerwidget(edit=self._root_widget,
cancel_button=self._cancel_button)
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, self._height * 0.64),
size=(0, 0),
color=(1, 1, 1, 0.8),
scale=1.2,
h_align='center',
v_align='center',
text=ba.Lstr(resource='reportThisPlayerReasonText'),
maxwidth=self._width * 0.85)
ba.buttonwidget(parent=self._root_widget,
size=(235, 60),
position=(20, 30),
label=ba.Lstr(resource='reportThisPlayerLanguageText'),
on_activate_call=self._on_language_press,
autoselect=True)
ba.buttonwidget(parent=self._root_widget,
size=(235, 60),
position=(self._width - 255, 30),
label=ba.Lstr(resource='reportThisPlayerCheatingText'),
on_activate_call=self._on_cheating_press,
autoselect=True)
def _on_language_press(self) -> None:
from urllib import parse
_ba.add_transaction({
'type': 'REPORT_ACCOUNT',
'reason': 'language',
'account': self._account_id
})
body = ba.Lstr(resource='reportPlayerExplanationText').evaluate()
ba.open_url('mailto:<EMAIL>'
f'?subject={_ba.appnameupper()} Player Report: ' +
self._account_id + '&body=' + parse.quote(body))
self.close()
def _on_cheating_press(self) -> None:
from urllib import parse
_ba.add_transaction({
'type': 'REPORT_ACCOUNT',
'reason': 'cheating',
'account': self._account_id
})
body = ba.Lstr(resource='reportPlayerExplanationText').evaluate()
ba.open_url('mailto:<EMAIL>'
f'?subject={_ba.appnameupper()} Player Report: ' +
self._account_id + '&body=' + parse.quote(body))
self.close()
def close(self) -> None:
"""Close the window."""
ba.containerwidget(edit=self._root_widget, transition='out_scale')
|
code/ReplaceDenormals.py | starimeL/PytorchConverter | 411 | 11139926 | <reponame>starimeL/PytorchConverter
import numpy as np
import torch
def ReplaceDenormals(net):
for name, param in net.named_parameters():
np_arr = param.data.numpy()
for x in np.nditer(np_arr, op_flags=['readwrite']):
if abs(x) < 1e-30:
x[...] = 1e-30
param.data = torch.from_numpy(np_arr)
|
keras_extensions/rbm.py | xgenpanda/keras_extension | 225 | 11139929 | <gh_stars>100-1000
from __future__ import division
import numpy as np
from keras import initializations, regularizers, constraints
from keras import backend as K
from keras.layers.core import Layer, Dense
from .backend import random_binomial
import theano
class RBM(Layer):
"""
Bernoulli-Bernoulli Restricted Boltzmann Machine (RBM).
"""
# keras.core.Layer part (modified from keras.core.Dense)
# ------------------------------------------------------
def __init__(self, input_dim, hidden_dim, init='glorot_uniform', weights=None, name=None,
W_regularizer=None, bx_regularizer=None, bh_regularizer=None, #activity_regularizer=None,
W_constraint=None, bx_constraint=None, bh_constraint=None):
super(RBM, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.input = K.placeholder(ndim = 2)
self.W = self.init((self.input_dim, self.hidden_dim))
self.bx = K.zeros((self.input_dim))
self.bh = K.zeros((self.hidden_dim))
self.params = [self.W, self.bx, self.bh]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.bx_regularizer = regularizers.get(bx_regularizer)
if self.bx_regularizer:
self.bx_regularizer.set_param(self.bx)
self.regularizers.append(self.bx_regularizer)
self.bh_regularizer = regularizers.get(bh_regularizer)
if self.bh_regularizer:
self.bh_regularizer.set_param(self.bh)
self.regularizers.append(self.bh_regularizer)
#self.activity_regularizer = regularizers.get(activity_regularizer)
#if self.activity_regularizer:
# self.activity_regularizer.set_layer(self)
# self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.bx_constraint = constraints.get(bx_constraint)
self.bh_constraint = constraints.get(bh_constraint)
self.constraints = [self.W_constraint, self.bx_constraint, self.bh_constraint]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def set_name(self, name):
self.W.name = '%s_W' % name
self.bx.name = '%s_bx' % name
self.bh.name = '%s_bh' % name
@property
def nb_input(self):
return 1
@property
def nb_output(self):
return 0 # RBM has no output, use get_h_given_x_layer(), get_x_given_h_layer() instead
def get_input(self, train=False):
return self.input
def get_output(self, train=False):
return None # RBM has no output, use get_h_given_x_layer(), get_x_given_h_layer() instead
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"hidden_dim": self.hidden_dim,
"init": self.init.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"bx_regularizer": self.bx_regularizer.get_config() if self.bx_regularizer else None,
"bh_regularizer": self.bh_regularizer.get_config() if self.bh_regularizer else None,
#"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"bx_constraint": self.bx_constraint.get_config() if self.bx_constraint else None,
"bh_constraint": self.bh_constraint.get_config() if self.bh_constraint else None}
# persistence, copied from keras.models.Sequential
def save_weights(self, filepath, overwrite=False):
# Save weights to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
weights = self.get_weights()
f.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = f.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
weights = [f['param_{}'.format(p)] for p in range(f.attrs['nb_params'])]
self.set_weights(weights)
f.close()
# -------------
# RBM internals
# -------------
def free_energy(self, x):
"""
Compute free energy for Bernoulli RBM, given visible units.
The marginal probability p(x) = sum_h 1/Z exp(-E(x, h)) can be re-arranged to the form
p(x) = 1/Z exp(-F(x)), where the free energy F(x) = -sum_j=1^H log(1 + exp(x^T W[:,j] + bh_j)) - bx^T x,
in case of the Bernoulli RBM energy function.
"""
wx_b = K.dot(x, self.W) + self.bh
hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
vbias_term = K.dot(x, self.bx)
return -hidden_term - vbias_term
def sample_h_given_x(self, x):
"""
Draw sample from p(h|x).
For Bernoulli RBM the conditional probability distribution can be derived to be
p(h_j=1|x) = sigmoid(x^T W[:,j] + bh_j).
"""
h_pre = K.dot(x, self.W) + self.bh # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
h_sigm = K.sigmoid(h_pre) # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
h_samp = random_binomial(shape=h_sigm.shape, n=1, p=h_sigm)
# random sample
# \hat{h} = 1, if p(h=1|x) > uniform(0, 1)
# 0, otherwise
# pre and sigm are returned to compute cross-entropy
return h_samp, h_pre, h_sigm
def sample_x_given_h(self, h):
"""
Draw sample from p(x|h).
For Bernoulli RBM the conditional probability distribution can be derived to be
p(x_i=1|h) = sigmoid(W[i,:] h + bx_i).
"""
x_pre = K.dot(h, self.W.T) + self.bx # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
x_sigm = K.sigmoid(x_pre) # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
x_samp = random_binomial(shape=x_sigm.shape, n=1, p=x_sigm)
# random sample
# \hat{x} = 1, if p(x=1|h) > uniform(0, 1)
# 0, otherwise
# pre and sigm are returned to compute cross-entropy
return x_samp, x_pre, x_sigm
def gibbs_xhx(self, x0):
"""
Perform one step of Gibbs sampling, starting from visible sample.
h1 ~ p(h|x0)
x1 ~ p(x|h1)
"""
h1, h1_pre, h1_sigm = self.sample_h_given_x(x0)
x1, x1_pre, x1_sigm = self.sample_x_given_h(h1)
# pre and sigm are returned to compute cross-entropy
return x1, x1_pre, x1_sigm
def mcmc_chain(self, x, nb_gibbs_steps):
"""
Perform Markov Chain Monte Carlo, run k steps of Gibbs sampling,
starting from visible data, return point estimate at end of chain.
x0 (data) -> h1 -> x1 -> ... -> xk (reconstruction, negative sample)
"""
xi = x
for i in xrange(nb_gibbs_steps):
xi, xi_pre, xi_sigm = self.gibbs_xhx(xi)
x_rec, x_rec_pre, x_rec_sigm = xi, xi_pre, xi_sigm
x_rec = theano.gradient.disconnected_grad(x_rec) # avoid back-propagating gradient through the Gibbs sampling
# this is similar to T.grad(.., consider_constant=[chain_end])
# however, as grad() is called in keras.optimizers.Optimizer,
# we do it here instead to avoid having to change Keras' code
return x_rec, x_rec_pre, x_rec_sigm
def contrastive_divergence_loss(self, nb_gibbs_steps=1):
"""
Compute contrastive divergence loss with k steps of Gibbs sampling (CD-k).
Result is a Theano expression with the form loss = f(x).
"""
def loss(x):
x_rec, _, _ = self.mcmc_chain(x, nb_gibbs_steps)
cd = K.mean(self.free_energy(x)) - K.mean(self.free_energy(x_rec))
return cd
return loss
def reconstruction_loss(self, nb_gibbs_steps=1):
"""
Compute binary cross-entropy between the binary input data and the reconstruction generated by the model.
Result is a Theano expression with the form loss = f(x).
Useful as a rough indication of training progress (see Hinton2010).
Summed over feature dimensions, mean over samples.
"""
def loss(x):
_, pre, _ = self.mcmc_chain(x, nb_gibbs_steps)
# NOTE:
# when computing log(sigmoid(x)) and log(1 - sigmoid(x)) of cross-entropy,
# if x is very big negative, sigmoid(x) will be 0 and log(0) will be nan or -inf
# if x is very big positive, sigmoid(x) will be 1 and log(1-0) will be nan or -inf
# Theano automatically rewrites this kind of expression using log(sigmoid(x)) = -softplus(-x), which
# is more stable numerically
# however, as the sigmoid() function used in the reconstruction is inside a scan() operation, Theano
# doesn't 'see' it and is not able to perform the change; as a work-around we use pre-sigmoid value
# generated inside the scan() and apply the sigmoid here
#
# NOTE:
# not sure how important this is; in most cases seems to work fine using just T.nnet.binary_crossentropy()
# for instance; keras.objectives.binary_crossentropy() simply clips the value entering the log(); and
# this is only used for monitoring, not calculating gradient
cross_entropy_loss = -T.mean(T.sum(x*T.log(T.nnet.sigmoid(pre)) + (1 - x)*T.log(1 - T.nnet.sigmoid(pre)), axis=1))
return cross_entropy_loss
return loss
def free_energy_gap(self, x_train, x_test):
"""
Computes the free energy gap between train and test set, F(x_test) - F(x_train).
In order to avoid overfitting, we cannot directly monitor if the probability of held out data is
starting to decrease, due to the partition function.
We can however compute the ratio p(x_train)/p(x_test), because here the partition functions cancel out.
This ratio should be close to 1, if it is > 1, the model may be overfitting.
The ratio can be compute as,
r = p(x_train)/p(x_test) = exp(-F(x_train) + F(x_test)).
Alternatively, we compute the free energy gap,
gap = F(x_test) - F(x_train),
where F(x) indicates the mean free energy of test data and a representative subset of
training data respectively.
The gap should around 0 normally, but when it starts to grow, the model may be overfitting.
However, even when the gap is growing, the probability of the training data may be growing even faster,
so the probability of the test data may still be improving.
See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines", UTML TR 2010-003, 2010, section 6.
"""
return T.mean(self.free_energy(x_train)) - T.mean(self.free_energy(x_test))
def get_h_given_x_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Bernoulli distribution p(h|x), ie. p(h=1|x).
"""
if as_initial_layer:
layer = Dense(input_dim=self.input_dim, output_dim=self.hidden_dim, activation='sigmoid', weights=[self.W.get_value(), self.bh.get_value()])
else:
layer = Dense(output_dim=self.hidden_dim, activation='sigmoid', weights=[self.W.get_value(), self.bh.get_value()])
return layer
def get_x_given_h_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Bernoulli distribution p(x|h), ie. p(x=1|h).
"""
if as_initial_layer:
layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
else:
layer = Dense(output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
return layer
class GBRBM(RBM):
"""
Gaussian-Bernoulli Restricted Boltzmann Machine (GB-RBM).
This GB-RBM does not learn variances of Gaussian units, but instead fixes them to 1 and
uses noise-free reconstructions. Input data should be pre-processed to have zero mean
and unit variance along the feature dimensions.
See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines", UTML TR 2010-003, 2010, section 13.2.
"""
def __init__(self, input_dim, hidden_dim, init='glorot_uniform', weights=None, name=None,
W_regularizer=None, bx_regularizer=None, bh_regularizer=None, #activity_regularizer=None,
W_constraint=None, bx_constraint=None, bh_constraint=None):
super(GBRBM, self).__init__(input_dim, hidden_dim, init, weights, name,
W_regularizer, bx_regularizer, bh_regularizer, #activity_regularizer,
W_constraint, bx_constraint, bh_constraint)
# inherited RBM functions same as BB-RBM
# -------------
# RBM internals
# -------------
def free_energy(self, x):
wx_b = K.dot(x, self.W) + self.bh
vbias_term = 0.5*K.sum((x - self.bx)**2, axis=1)
hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
return -hidden_term + vbias_term
# sample_h_given_x() same as BB-RBM
def sample_x_given_h(self, h):
"""
Draw sample from p(x|h).
For Gaussian-Bernoulli RBM the conditional probability distribution can be derived to be
p(x_i|h) = norm(x_i; sigma_i W[i,:] h + bx_i, sigma_i^2).
"""
x_mean = K.dot(h, self.W.T) + self.bx
x_samp = x_mean
# variances of the Gaussian units are not learned,
# instead we fix them to 1 in the energy function
# here, instead of sampling from the Gaussian distributions,
# we simply take their means; we'll end up with a noise-free reconstruction
# here last two returns are dummy variables related to Bernoulli RBM base class (returning e.g. x_samp, None, None doesn't work)
return x_samp, x_samp, x_samp
# gibbs_xhx() same as BB-RBM
# mcmc_chain() same as BB-RBM
def reconstruction_loss(self, nb_gibbs_steps=1):
"""
Compute mean squared error between input data and the reconstruction generated by the model.
Result is a Theano expression with the form loss = f(x).
Useful as a rough indication of training progress (see Hinton2010).
Mean over samples and feature dimensions.
"""
def loss(x):
x_rec, _, _ = self.mcmc_chain(x, nb_gibbs_steps)
return K.mean(K.sqr(x - x_rec))
return loss
# free_energy_gap() same as BB-RBM
# get_h_given_x_layer() same as BB-RBM
def get_x_given_h_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Gaussian distribution p(x|h).
"""
if not as_initial_layer:
layer = Dense(output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()])
else:
layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()])
return layer
|
nazurin/sites/Pixiv/models.py | amber6hua/nazurin | 170 | 11139958 | <filename>nazurin/sites/Pixiv/models.py
from dataclasses import dataclass
from random import random
from nazurin.models import Illust, Image
from .config import HEADERS, IMG_PROXY
@dataclass
class PixivImage(Image):
async def display_url(self):
# use reverse proxy to avoid strange problems
url = await self.chosen_url()
return url.replace('i.pximg.net', IMG_PROXY) + '?' + str(random())
@dataclass
class PixivIllust(Illust):
async def download(self, **kwargs):
await super().download(headers=HEADERS, **kwargs)
|
tests/integration/test_reexec.py | Satertek/pex | 2,160 | 11139967 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import subprocess
import sys
from textwrap import dedent
import pytest
from pex.common import temporary_dir
from pex.interpreter import PythonInterpreter
from pex.testing import (
PY27,
PY38,
ensure_python_interpreter,
make_env,
run_pex_command,
run_simple_pex,
)
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Iterable, Optional, List
def _assert_exec_chain(
exec_chain=None, # type: Optional[List[str]]
pex_python=None, # type: Optional[str]
pex_python_path=None, # type: Optional[Iterable[str]]
interpreter_constraints=None, # type: Optional[Iterable[str]]
pythonpath=None, # type: Optional[Iterable[str]]
):
# type: (...) -> None
with temporary_dir() as td:
test_pex = os.path.join(td, "test.pex")
args = ["-o", test_pex]
if interpreter_constraints:
args.extend("--interpreter-constraint={}".format(ic) for ic in interpreter_constraints)
env = os.environ.copy()
PATH = env["PATH"].split(os.pathsep)
def add_to_path(entry):
# type: (str) -> None
if os.path.isfile(entry):
entry = os.path.dirname(entry)
PATH.append(entry)
if pex_python:
add_to_path(pex_python)
if pex_python_path:
for path in pex_python_path:
add_to_path(path)
env["PATH"] = os.pathsep.join(PATH)
result = run_pex_command(args, env=env)
result.assert_success()
env = make_env(
_PEX_EXEC_CHAIN=1,
PEX_INTERPRETER=1,
PEX_PYTHON=pex_python,
PEX_PYTHON_PATH=os.pathsep.join(pex_python_path) if pex_python_path else None,
PYTHONPATH=os.pathsep.join(pythonpath) if pythonpath else None,
)
initial_interpreter = PythonInterpreter.get()
output = subprocess.check_output(
[
initial_interpreter.binary,
test_pex,
"-c",
"import json, os; print(json.dumps(os.environ.copy()))",
],
env=env,
)
final_env = json.loads(output.decode("utf-8"))
assert "PEX_PYTHON" not in final_env
assert "PEX_PYTHON_PATH" not in final_env
assert "_PEX_SHOULD_EXIT_BOOTSTRAP_REEXEC" not in final_env
expected_exec_interpreters = [initial_interpreter]
if exec_chain:
expected_exec_interpreters.extend(PythonInterpreter.from_binary(b) for b in exec_chain)
final_interpreter = expected_exec_interpreters[-1]
if final_interpreter.is_venv:
# If the last interpreter in the chain is in a virtual environment, it should be fully
# resolved and re-exec'd against in order to escape the virtual environment since we're
# not setting PEX_INHERIT_PATH in these tests.
resolved = final_interpreter.resolve_base_interpreter()
if exec_chain:
# There is already an expected reason to re-exec; so no extra exec step is needed.
expected_exec_interpreters[-1] = resolved
else:
# The expected exec chain is just the initial_interpreter, but it turned out to be a
# venv which forces a re-exec.
expected_exec_interpreters.append(resolved)
expected_exec_chain = [i.binary for i in expected_exec_interpreters]
actual_exec_chain = final_env["_PEX_EXEC_CHAIN"].split(os.pathsep)
assert expected_exec_chain == actual_exec_chain
def test_pex_no_reexec_no_constraints():
# type: () -> None
_assert_exec_chain()
def test_pex_reexec_no_constraints_pythonpath_present():
# type: () -> None
_assert_exec_chain(exec_chain=[sys.executable], pythonpath=["."])
def test_pex_no_reexec_constraints_match_current():
# type: () -> None
_assert_exec_chain(interpreter_constraints=[PythonInterpreter.get().identity.requirement])
def test_pex_reexec_constraints_match_current_pythonpath_present():
# type: () -> None
_assert_exec_chain(
exec_chain=[sys.executable],
pythonpath=["."],
interpreter_constraints=[PythonInterpreter.get().identity.requirement],
)
def test_pex_reexec_constraints_dont_match_current_pex_python_path():
# type: () -> None
py38_interpreter = ensure_python_interpreter(PY38)
py27_interpreter = ensure_python_interpreter(PY27)
_assert_exec_chain(
exec_chain=[py38_interpreter],
pex_python_path=[py27_interpreter, py38_interpreter],
interpreter_constraints=["=={}".format(PY38)],
)
def test_pex_reexec_constraints_dont_match_current_pex_python_path_min_py_version_selected():
# type: () -> None
py38_interpreter = ensure_python_interpreter(PY38)
py27_interpreter = ensure_python_interpreter(PY27)
_assert_exec_chain(
exec_chain=[py27_interpreter], pex_python_path=[py38_interpreter, py27_interpreter]
)
def test_pex_reexec_constraints_dont_match_current_pex_python():
# type: () -> None
version = PY27 if sys.version_info[:2] == (3, 8) else PY38
interpreter = ensure_python_interpreter(version)
_assert_exec_chain(
exec_chain=[interpreter],
pex_python=interpreter,
interpreter_constraints=["=={}".format(version)],
)
@pytest.mark.xfail(reason="See https://github.com/pantsbuild/pants/issues/4682")
def test_pex_re_exec_failure():
# type: () -> None
with temporary_dir() as output_dir:
# create 2 pex files for PEX_PATH
pex1_path = os.path.join(output_dir, "pex1.pex")
res1 = run_pex_command(["--disable-cache", "requests", "-o", pex1_path])
res1.assert_success()
pex2_path = os.path.join(output_dir, "pex2.pex")
res2 = run_pex_command(["--disable-cache", "flask", "-o", pex2_path])
res2.assert_success()
pex_path = ":".join(os.path.join(output_dir, name) for name in ("pex1.pex", "pex2.pex"))
# create test file test.py that attmepts to import modules from pex1/pex2
test_file_path = os.path.join(output_dir, "test.py")
with open(test_file_path, "w") as fh:
fh.write(
dedent(
"""
import requests
import flask
import sys
import os
import subprocess
if 'RAN_ONCE' in os.environ::
print('Hello world')
else:
env = os.environ.copy()
env['RAN_ONCE'] = '1'
subprocess.call([sys.executable] + sys.argv, env=env)
sys.exit()
"""
)
)
# set up env for pex build with PEX_PATH in the environment
env = make_env(PEX_PATH=pex_path)
# build composite pex of pex1/pex1
pex_out_path = os.path.join(output_dir, "out.pex")
run_pex_command(["--disable-cache", "wheel", "-o", pex_out_path])
# run test.py with composite env
stdout, rc = run_simple_pex(pex_out_path, [test_file_path], env=env)
assert rc == 0
assert stdout == b"Hello world\n"
|
kats/models/globalmodel/ensemble.py | iamxiaodong/Kats | 3,580 | 11140005 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import time
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
from typing import List, Optional, Union, Any, Tuple, Dict
import joblib
import numpy as np
import pandas as pd
import torch
from kats.consts import TimeSeriesData
from kats.models.globalmodel.model import GMModel, gmparam_from_string
from kats.models.globalmodel.utils import GMParam, gmpreprocess, split
class GMEnsemble:
"""A class for building the global model ensemble.
GMEnsemble is a framework for building the ensemble of global models. It provides functions including train, predict and save_model.
Attributes:
gmparam: A :class:`kats.models.globalmodel.utils.GMParam` object building the for global model ensemble.
ensemble_type: Optional; A string representing the ensemble type. Can be 'median' or 'mean'. Default is 'median'.
splits: Optional; An positive integer representing the number of sub-datasets to be built. Default is 3.
overlap: Optional; A boolean representing whether or not sub-datasets overlap with each other or not. For example, we have samples [ts1, ts2, ts3] and splits = 3.
If overlap is True, then three subsets are [[ts1], [ts2], [ts3]], i.e., each sample only appears in one sub-dataset.
If overlap is False, then three subsets are [[ts1, ts2], [ts2, ts3], [ts3, ts1]], i.e., each sample appears in (splits-1) sub-datasets.
Default is True.
replicate: Optional; A positive integer representing the number of global models to be trained on each sub-datasets. Default is 1.
multi: Optional; A boolean representing whether or not to use multi-processing for training and prediction. Default is False.
max_core: Optional; A positive integer representing the number of available cpu cores. Default is None, which sets the number of cores to (total_cores - 1) // 2.
Sample Usage:
>>> gme = GMEnsemble(params)
>>> # train an ensemble object and get training info (e.g., training/validation losses)
>>> training_info = gme.train(train_TSs, valid_TSs)
>>> # make prediction
>>> gme.predict(train_TSs)
>>> # save model
>>> gme.save_model("global_model_ensemble.pickle")
>>> # Evalute model performance on a given dataset.
>>> evals = gme.evalute(test_train, test_test)
"""
def __init__(
self,
gmparam: GMParam,
ensemble_type: str = "median",
splits: int = 3,
overlap: bool = True,
replicate: int = 1,
multi: bool = False,
max_core: Optional[int] = None,
) -> None:
if not isinstance(gmparam, GMParam):
msg = f"gmparam should be GMParam object but receives {type(gmparam)}."
logging.error(msg)
raise ValueError(msg)
self.params = gmparam
if ensemble_type == "median":
self._ensemble_func = np.median
elif ensemble_type == "mean":
self._ensemble_func = np.mean
else:
msg = f"ensemble_type should be either 'mean' or 'median' but receives {ensemble_type}."
logging.error(msg)
raise ValueError(msg)
self.ensemble_type = ensemble_type
if not isinstance(splits, int) or splits < 1:
msg = f"splits should be a positive integer but receives {splits}."
logging.error(msg)
raise ValueError(msg)
self.splits = splits
self.overlap = overlap
if not isinstance(replicate, int) or replicate < 1:
msg = f"rep should be a positive integer but receives {replicate}."
logging.error(msg)
raise ValueError(msg)
self.replicate = replicate
self.model_num = int(self.replicate * self.splits)
self.multi = multi
total_cores = cpu_count()
if max_core is None:
self.max_core = max((total_cores - 1) // 2, 1)
elif isinstance(max_core, int) and max_core > 0 and max_core < total_cores:
self.max_core = max_core
else:
msg = f"max_core should be a positive integer in [1, {total_cores}] but receives {max_core}."
logging.error(msg)
raise ValueError(msg)
self.gm_info = []
self.gm_models = [GMModel(self.params) for _ in range(self.model_num)]
self.test_ids = []
def _fit_single_gm(
self,
gm: GMModel,
train_TSs: Dict[Any, TimeSeriesData],
valid_TSs: Optional[Dict[Any, TimeSeriesData]],
random_seed: Optional[int] = None,
test_train_TSs: Optional[Dict[Any, TimeSeriesData]] = None,
test_valid_TSs: Optional[Dict[Any, TimeSeriesData]] = None,
) -> Dict[str, Any]:
"""Fit a global model and return training information.
Args:
gmparam: A GMParam object for global model.
train_TSs: A dictionary representing the training time series.
valid_TSs: A dictionary representing the corresponding validation time series.
random_seed: Optional; An integer representing the random seed. Default is None, i.e., no random seed is set.
test_train_TSs: Optional; A dictionary representing the training part of the test time series. Default is None.
test_test_TSs: Optional; A dictionary representing the testing part of the test time series. Default is None.
Returns:
gm: A :class:`kats.models.globalmodel.model.GMModel` object representing the trained global model.
info: A dictionary representing the training information of the global model.
"""
if random_seed is not None:
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# to ensure performance
torch.set_num_threads(1)
training_info = gm.train(
train_TSs,
valid_TSs,
test_train_TSs,
test_valid_TSs,
fcst_monitor=False,
debug=False,
)
return training_info
def _predict_single_gm(self, gm, test_TSs, steps, test_batch_size=1000):
t = time.time()
fcst = gm.predict(
test_TSs, steps=steps, raw=True, test_batch_size=test_batch_size
)
logging.info(f"fcst {len(fcst)} TSs with {time.time()-t}.")
return fcst
def train(
self,
data: Union[Dict[Any, TimeSeriesData], List[TimeSeriesData]],
test_size: float = 0.1,
valid_set: bool = False,
) -> None:
"""Train base global models.
Args:
data: A list or a dictionary of time series.
test_size: Optional; A float in [0,1) representing the percentage that the test set takes up. Default is 0.1
valid_set: Optional; A boolean specifying whether or not to have a validation set during training. Default is False.
"""
n = len(data)
keys = np.array(list(data.keys())) if isinstance(data, dict) else np.arange(n)
if test_size < 0 or test_size > 1:
msg = f"test_size should be in [0,1) but receives {test_size}."
logging.error(msg)
raise ValueError(msg)
if test_size > 0:
m = max(1, int(n * test_size))
np.random.shuffle(keys)
all_test_TSs = {keys[i]: data[keys[i]] for i in range(m)}
test_train_TSs, test_valid_TSs = gmpreprocess(
self.params, all_test_TSs, mode="test"
)
all_train_TSs = {keys[i]: data[keys[i]] for i in range(m, n)}
train_TSs, valid_TSs = gmpreprocess(
self.params, all_train_TSs, mode="train", valid_set=valid_set
)
self.test_ids = list(test_train_TSs.keys())
else:
train_TSs, valid_TSs = gmpreprocess(
self.params, data, mode="train", valid_set=valid_set
)
test_train_TSs, test_valid_TSs = None, None
self.test_ids = []
split_data = split(self.splits, self.overlap, train_TSs, valid_TSs)
# multi processing
if self.multi:
t0 = time.time()
rds = np.random.randint(1, int(10000 * self.model_num), self.model_num)
model_params = [
(
self.gm_models[i],
split_data[i % self.splits][0],
split_data[i % self.splits][1],
rds[i],
test_train_TSs,
test_valid_TSs,
)
for i in range(self.model_num)
]
pool = Pool(self.max_core)
results = pool.starmap(self._fit_single_gm, model_params)
pool.close()
pool.join()
# return results
self.gm_info = results
logging.info(
f"fit {self.model_num} global models using time {time.time()-t0}"
)
else:
self.gm_info = []
t0 = time.time()
i = 0
for _ in range(self.replicate):
for train, valid in split_data:
info = self._fit_single_gm(
self.gm_models[i],
train,
valid,
test_train_TSs=test_train_TSs,
test_valid_TSs=test_valid_TSs,
)
self.gm_info.append(info)
i += 1
logging.info(
f"fit {self.model_num} global models using time {time.time()-t0}"
)
return
def _combine_fcst(
self,
idx: Any,
fcsts: List[np.ndarray],
steps: int,
raw: bool,
first_timestamp: Optional[pd.Timestamp] = None,
col_names: Optional[List[str]] = None,
) -> Tuple[Any, Any]:
"""Combine the forecasts from each global model."""
fcst = [
self._ensemble_func([fcsts[i][j] for i in range(len(fcsts))], axis=0)
for j in range(len(fcsts[0]))
]
if raw:
return idx, fcst
else:
n_quantile = len(self.params.quantile)
df = pd.DataFrame(
np.column_stack([t.reshape(n_quantile, -1) for t in fcst]).T
).iloc[:steps]
df.columns = col_names
df["time"] = pd.date_range(
first_timestamp + self.params.freq, periods=steps, freq=self.params.freq
)
return idx, df
def predict(
self,
test_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
steps: int,
test_batch_size: int = 500,
raw: bool = False,
) -> Dict[Any, Union[pd.DataFrame, List[np.ndarray]]]:
"""Generate forecasts for the target time series.
Args:
test_TSs: A TimeSeriesDdata object, list or a dictionary of time series to generate forecasts for.
steps: An integer representing the forecast steps.
test_batch_size: Optional; An integer representing the batch size for testing. Default is 500.
raw: Optional; A boolean representing whether or not to return raw forecasts (i.e., `numpy.ndarray` objects). If False, the forecasts are `pandas.DataFrame` objects. Default is False.
Returns:
A dictionary of forecasts, whose keys are the ids for time series, and values are the corresponding forecasts.
"""
if isinstance(test_TSs, TimeSeriesData):
test_TSs = [test_TSs]
elif isinstance(test_TSs, dict) or isinstance(test_TSs, list):
pass
else:
msg = f"predict function only accepts a TimeSeriesData object, a dictionary or a list of TimeSeriesData objects, but receives {type(test_TSs)}"
if steps <= 0:
msg = f"step should be a positive integer but receives {steps}."
logging.error(msg)
raise ValueError(msg)
if not isinstance(test_batch_size, int) or test_batch_size <= 0:
msg = f"test_batch_size should be a positive integer but receives {test_batch_size}."
logging.error(msg)
raise ValueError(msg)
t0 = time.time()
if self.multi:
pool = Pool(self.max_core)
all_fcsts = pool.starmap(
self._predict_single_gm,
[(t, test_TSs, steps, test_batch_size) for t in self.gm_models],
)
pool.close()
pool.join()
else:
all_fcsts = [
m.predict(test_TSs, steps, raw=True, test_batch_size=test_batch_size)
for m in self.gm_models
]
logging.info(
f"time for all global model to generate forecasts: {time.time() - t0}."
)
keys = (
test_TSs.keys() if isinstance(test_TSs, dict) else np.arange(len(test_TSs))
)
col_names = (
[f"fcst_quantile_{q}" for q in self.params.quantile] if (not raw) else None
)
if self.multi:
cf_params = [
(
k,
[all_fcsts[i][k] for i in range(self.model_num)],
steps,
raw,
test_TSs[k].time.iloc[-1],
col_names,
)
for k in keys
]
pool = Pool(self.max_core)
results = pool.starmap(self._combine_fcst, cf_params)
pool.close()
pool.join()
return {t[0]: t[1] for t in results}
else:
ans = {}
for k in keys:
try:
ans[k] = self._combine_fcst(
k,
[all_fcsts[i][k] for i in range(self.model_num)],
steps,
raw,
test_TSs[k].time.iloc[-1],
col_names,
)[1]
except Exception as e:
msg = f"Fail to generate forecasts with Exception {e}."
logging.error(msg)
raise ValueError(msg)
return ans
def save_model(self, file_name: str) -> None:
"""Save ensemble model to file.
Args:
file_name: A string representing the file address and file name.
"""
if len(self.gm_models) == 0:
msg = "Please train global models before saving GMEnsemble."
logging.error(msg)
raise ValueError(msg)
try:
# clean-up unnecessary info
[gm._reset_nn_states() for gm in self.gm_models]
state_dict = (
[gm.rnn.state_dict() for gm in self.gm_models]
if self.params.model_type == "rnn"
else None
)
encoder_dict = (
[gm.encoder.state_dict() for gm in self.gm_models]
if self.params.model_type == "s2s"
else None
)
decoder_dict = (
[gm.decoder.state_dict() for gm in self.gm_models]
if self.params.model_type == "s2s"
else None
)
gmparam_string = self.params.to_string()
info = {
"state_dict": state_dict,
"encoder_dict": encoder_dict,
"decoder_dict": decoder_dict,
"gmparam_string": gmparam_string,
"gm_info": self.gm_info,
"test_ids": self.test_ids,
"gmensemble_params": {},
}
for attr in [
"splits",
"overlap",
"replicate",
"multi",
"max_core",
"ensemble_type",
]:
info["gmensemble_params"][attr] = getattr(self, attr)
with open(file_name, "wb") as f:
joblib.dump(info, f)
logging.info(f"Successfully save GMEnsemble to {file_name}.")
except Exception as e:
msg = f"Fail to save GMEnsemble to {file_name} with Exception {e}."
logging.error(msg)
raise ValueError(msg)
def evaluate(
self,
test_train_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
test_valid_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
) -> pd.DataFrame:
"""Evaluate the GMEnsemble object performance.
A wrapper function to evaluate model performance on a given time series data set.
Args:
test_train_TSs: A list or a dictionary of :class:`kats.consts.TimeSeriesData` objects for warming-ups.
test_valid_TSs: A list or a dictionary of :class:`kats.consts.TimeSeriesData` objects for evaluation.
Returns:
A `pandas.DataFrame` object representing the evaluation results.
"""
if type(test_train_TSs) != type(test_valid_TSs):
msg = (
"The data type of test_train_TSs and test_valid_TSs should be the same."
)
logging.error(msg)
raise ValueError(msg)
if isinstance(test_train_TSs, TimeSeriesData):
test_train_TSs = [test_train_TSs]
# pyre-fixme[9]
test_valid_TSs = [test_valid_TSs]
if len(test_train_TSs) != len(test_valid_TSs):
msg = "test_train_TSs and test_valid_TSs should be of the same length."
logging.error(msg)
raise ValueError(msg)
keys = (
test_train_TSs.keys()
if isinstance(test_train_TSs, dict)
else range(len(test_train_TSs))
)
if len(keys) == 0:
msg = "The input collection of time series should not be empty."
logging.error(msg)
raise ValueError(msg)
steps = np.max([len(test_valid_TSs[t]) for t in keys])
fcst = self.predict(test_train_TSs, steps=steps, raw=True)
logging.info(
f"Successfully generate forecasts for all test time series with length {steps}."
)
eval_func = self.gm_models[0].build_validation_function()
fcst_window = self.params.fcst_window
ans = []
keys = (
test_train_TSs.keys()
if isinstance(test_train_TSs, dict)
else range(len(test_train_TSs))
)
for k in keys:
tmp = test_valid_TSs[k].value.values
tmp_step = len(tmp) // fcst_window + int(len(tmp) % fcst_window != 0)
tmp_fcst_length = tmp_step * fcst_window
actuals = np.full(tmp_fcst_length, np.nan, np.float)
actuals[: len(tmp)] = tmp
for j in range(tmp_step):
tmp_actuals = actuals[j * fcst_window : (j + 1) * fcst_window]
tmp = eval_func(fcst[k][j], tmp_actuals)
tmp["step"] = j
tmp["idx"] = k
ans.append(tmp)
return pd.DataFrame(ans)
def load_gmensemble_from_file(file_name: str) -> GMEnsemble:
"""Load a trained :class:`GMEnsemble` object from file.
Args:
file_name: A string representing the file saving the :class:`GMEnsemble` object.
Returns:
A :class:`GMEnsemble` object loaded from the file.
"""
try:
info = joblib.load(open(file_name, "rb"))
gmparam = gmparam_from_string(info["gmparam_string"])
n = (
len(info["state_dict"])
if info["state_dict"] is not None
else len(info["encoder_dict"])
)
gm_models = []
for i in range(n):
tmp_gmmodel = GMModel(gmparam)
if gmparam.model_type == "rnn":
tmp_gmmodel.build_rnn()
tmp_gmmodel.rnn.load_state_dict(info["state_dict"][i])
else:
tmp_gmmodel.build_s2s()
tmp_gmmodel.encoder.load_state_dict(info["encoder_dict"][i])
tmp_gmmodel.decoder.load_state_dict(info["decoder_dict"][i])
gm_models.append(tmp_gmmodel)
info["gmensemble_params"]["gmparam"] = gmparam
gmensemble = GMEnsemble(**info["gmensemble_params"])
gmensemble.gm_models = gm_models
gmensemble.gm_info = info["gm_info"]
except Exception as e:
msg = f"Fail to load GMEnsemble from {file_name} with Exception {e}."
logging.error(msg)
raise ValueError(msg)
return gmensemble
|
mlens/parallel/base.py | mehrdad-shokri/mlens | 760 | 11140018 | """ML-Ensemble
:author: <NAME>
:copyright: 2017-2018
:license: MIT
Base classes for parallel estimation
Schedulers for global setups:
0:
Base setups - independent of other features:
IndexMixin._setup_0_index
1:
Global setups - reserved for aggregating classes:
Layer._setup_1_global
2:
Dependents on 0:
ProbaMixin.__setup_2_multiplier
3:
Dependents on 0, 2:
OutputMixin.__setup_3__output_columns
Note that schedulers are experimental and may change without a deprecation
cycle.
"""
import warnings
from abc import abstractmethod
import numpy as np
from ._base_functions import check_stack, check_params
from .. import config
from ..utils.exceptions import ParallelProcessingError
from ..externals.sklearn.base import clone, BaseEstimator as _BaseEstimator
class ParamMixin(_BaseEstimator, object):
"""Parameter Mixin
Mixin for protecting static parameters from changes after fitting.
.. Note::
To use this mixin the instance inheriting it must set
``__static__=list()`` and ``_static_fit_params_=dict()``
in ``__init__``.
"""
def _store_static_params(self):
"""Record current static params for future comparison."""
if self.__static__:
for key, val in self.get_params(deep=False).items():
if key in self.__static__:
self._static_fit_params[key] = clone(val, safe=False)
def _check_static_params(self):
"""Check if current static params are identical to previous params"""
current_static_params = {
k: v for k, v in self.get_params(deep=False).items()
if k in self.__static__}
return check_params(self._static_fit_params, current_static_params)
class IndexMixin(object):
"""Indexer mixin
Mixin for handling indexers.
.. note::
To use this mixin the instance inheriting it must set the
``indexer`` or ``indexers`` attribute in ``__init__`` (not both).
"""
@property
def __indexer__(self):
"""Flag for existence of indexer"""
return hasattr(self, 'indexer') or hasattr(self, 'indexers')
def _check_indexer(self, indexer):
"""Check consistent indexer classes"""
cls = indexer.__class__.__name__.lower()
if 'index' not in cls:
ValueError("Passed indexer does not appear to be valid indexer")
lcls = [idx.__class__.__name__.lower() for idx in self._get_indexers()]
if lcls:
if 'blendindex' in lcls and cls != 'blendindex':
raise ValueError(
"Instance has blendindex, but was passed full type")
elif 'blendindex' not in lcls and cls == 'blendindex':
raise ValueError(
"Instance has full type index, but was passed blendindex")
def _get_indexers(self):
"""Return list of indexers"""
if not self.__indexer__:
raise AttributeError("No indexer or indexers attribute available")
indexers = [getattr(self, 'indexer', None)]
if None in indexers:
indexers = getattr(self, 'indexers', [None])
return indexers
def _setup_0_index(self, X, y, job):
indexers = self._get_indexers()
for indexer in indexers:
indexer.fit(X, y, job)
class OutputMixin(IndexMixin):
"""Output Mixin
Mixin class for interfacing with ParallelProcessing when outputs are
desired.
.. note::
To use this mixin the instance inheriting it must set the
``feature_span`` attribute and ``__no_output__`` flag in ``__init__``.
"""
@abstractmethod
def set_output_columns(self, X, y, job, n_left_concats=0):
"""Set output columns for prediction array"""
pass
def _setup_3_output_columns(self, X, y, job, n_left_concats=0):
"""Set output columns for prediction array. Used during setup"""
if not self.__no_output__:
self.set_output_columns(X, y, job, n_left_concats)
def shape(self, job):
"""Prediction array shape"""
if not hasattr(self, 'feature_span'):
raise ParallelProcessingError(
"Instance dose not set the feature_span attribute "
"in the constructor.")
if not self.feature_span:
raise ValueError("Columns not set. Call set_output_columns.")
return self.size(job), self.feature_span[1]
def size(self, attr):
"""Get size of dim 0"""
if attr not in ['n_test_samples', 'n_samples']:
attr = 'n_test_samples' if attr != 'predict' else 'n_samples'
indexers = self._get_indexers()
sizes = list()
for indexer in indexers:
sizes.append(getattr(indexer, attr))
sizes = np.unique(sizes)
if not sizes.shape[0] == 1:
warnings.warn(
"Inconsistent output sizes generated by indexers "
"(sizes: %r from indexers %r).\n"
"outputs will be zero-padded"
% (sizes.tolist(), indexers))
return max(sizes)
return sizes[0]
class ProbaMixin(object):
""""Probability Mixin
Mixin for probability features on objects
interfacing with :class:`~mlens.parallel.backend.ParallelProcessing`
.. note::
To use this mixin the instance inheriting it must set the ``proba``
and the ``_classes(=None)``attribute in ``__init__``.
"""
def _setup_2_multiplier(self, X, y, job=None):
if self.proba and y is not None:
self.classes_ = y
def _get_multiplier(self, X, y, alt=1):
if self.proba:
multiplier = self.classes_
else:
multiplier = alt
return multiplier
@property
def _predict_attr(self):
return 'predict' if not self.proba else 'predict_proba'
@property
def classes_(self):
"""Prediction classes during proba"""
return self._classes
@classes_.setter
def classes_(self, y):
"""Set classes given input y"""
self._classes = np.unique(y).shape[0]
class BaseBackend(object):
"""Base class for parallel backend
Implements default backend settings.
"""
def __init__(self, backend=None, n_jobs=-1, dtype=None,
raise_on_exception=True):
self.n_jobs = n_jobs
self.dtype = dtype if dtype is not None else config.get_dtype()
self.backend = backend if backend is not None else config.get_backend()
self.raise_on_exception = raise_on_exception
@abstractmethod
def __iter__(self):
yield
class BaseParallel(BaseBackend):
"""Base class for parallel objects
Parameters
----------
name : str
name of instance. Should be unique.
backend : str or object (default = 'threading')
backend infrastructure to use during call to
:class:`mlens.externals.joblib.Parallel`. See Joblib for further
documentation. To set global backend,
see :func:`~mlens.config.set_backend`.
raise_on_exception : bool (default = True)
whether to issue warnings on soft exceptions or raise error.
Examples include lack of layers, bad inputs, and failed fit of an
estimator in a layer. If set to ``False``, warnings are issued instead
but estimation continues unless exception is fatal. Note that this
can result in unexpected behavior unless the exception is anticipated.
verbose : int or bool (default = False)
level of verbosity.
n_jobs : int (default = -1)
Degree of concurrency in estimation. Set to -1 to maximize,
1 runs on a single process (or thread).
dtype : obj (default = np.float32)
data type to use, must be compatible with a numpy array dtype.
"""
def __init__(self, name, *args, **kwargs):
super(BaseParallel, self).__init__(*args, **kwargs)
self.name = name
self.__no_output__ = False
@abstractmethod
def __iter__(self):
"""Iterator for process manager"""
yield
def setup(self, X, y, job, skip=None, **kwargs):
"""Setup instance for estimation"""
skip = ['_setup_%s' % s for s in skip] if skip else []
funs = [f for f in dir(self)
if f.startswith('_setup_') and f not in skip]
for f in sorted(funs):
func = getattr(self, f)
args = func.__func__.__code__.co_varnames
fargs = {k: v for k, v in kwargs.items() if k in args}
func(X, y, job, **fargs)
class BaseEstimator(ParamMixin, _BaseEstimator, BaseParallel):
"""Base Parallel Estimator class
Modified Scikit-learn class to handle backend params that we want to
protect from changes.
"""
def __init__(self, *args, **kwargs):
super(BaseEstimator, self).__init__(*args, **kwargs)
self.__static__ = list()
self._static_fit_params = dict()
def get_params(self, deep=True):
out = super(BaseEstimator, self).get_params(deep=deep)
for name in BaseBackend.__init__.__code__.co_varnames:
if name not in ['self']:
out[name] = getattr(self, name)
return out
@property
@abstractmethod
def __fitted__(self):
"""Fit status"""
return self._check_static_params()
class BaseStacker(BaseEstimator):
"""Base class for instanes that stack job estimators"""
def __init__(self, stack=None, verbose=False, *args, **kwargs):
super(BaseStacker, self).__init__(*args, **kwargs)
if stack and not isinstance(stack, list):
raise ValueError("Stack must be a list. Got %r:" % type(stack))
self.stack = stack if stack else list()
self._verbose = verbose
@abstractmethod
def __iter__(self):
yield
def push(self, *stack):
"""Push onto stack"""
check_stack(stack, self.stack)
for item in stack:
self.stack.append(item)
attr = item.name.replace('-', '_').replace(' ', '').strip()
setattr(self, attr, item)
return self
def replace(self, idx, item):
"""Replace a current member of the stack with a new instance"""
attr = item.name.replace('-', '_').replace(' ', '').strip()
setattr(self, attr, item)
self.stack[idx] = item
def pop(self, idx):
"""Pop a previous push with index idx"""
return self.stack.pop(idx)
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
whether to return nested parameters.
"""
out = super(BaseStacker, self).get_params(deep=deep)
if not deep:
return out
for item in self.stack:
out[item.name] = item
for key, val in item.get_params(deep=True).items():
out['%s__%s' % (item.name, key)] = val
return out
@property
def __fitted__(self):
"""Fitted status"""
if not self.stack or not self._check_static_params():
return False
return all([g.__fitted__ for g in self.stack])
@property
def __stack__(self):
"""Check stack"""
if not isinstance(self.stack, list):
raise ValueError(
"Stack corrupted. Extected list. Got %r" % type(self.stack))
return len(self.stack) > 0
@property
def verbose(self):
"""Verbosity"""
return self._verbose
@verbose.setter
def verbose(self, verbose):
"""Set verbosity"""
self._verbose = verbose
for g in self.stack:
g.verbose = verbose
|
tests/test_private_func.py | raphaelahrens/pytm | 524 | 11140042 | import random
import unittest
from pytm.pytm import (
TM,
Actor,
Boundary,
Data,
Dataflow,
Datastore,
Process,
Server,
Threat,
)
class TestUniqueNames(unittest.TestCase):
def test_duplicate_boundary_names_have_different_unique_names(self):
random.seed(0)
object_1 = Boundary("foo")
object_2 = Boundary("foo")
object_1_uniq_name = object_1._uniq_name()
object_2_uniq_name = object_2._uniq_name()
self.assertNotEqual(object_1_uniq_name, object_2_uniq_name)
self.assertEqual(object_1_uniq_name, "boundary_foo_acf3059e70")
self.assertEqual(object_2_uniq_name, "boundary_foo_88f2d9c06f")
class TestAttributes(unittest.TestCase):
def test_write_once(self):
user = Actor("User")
with self.assertRaises(ValueError):
user.name = "Computer"
def test_kwargs(self):
user = Actor("User", isAdmin=True)
self.assertEqual(user.isAdmin, True)
user = Actor("User")
self.assertEqual(user.isAdmin, False)
user.isAdmin = True
self.assertEqual(user.isAdmin, True)
def test_load_threats(self):
tm = TM("TM")
self.assertNotEqual(len(TM._threats), 0)
with self.assertRaises(FileNotFoundError):
tm.threatsFile = "threats.json"
with self.assertRaises(FileNotFoundError):
TM("TM", threatsFile="threats.json")
def test_responses(self):
tm = TM("my test tm", description="aa", isOrdered=True)
user = Actor("User")
web = Server("Web Server")
db = Datastore("SQL Database")
http_req = Dataflow(user, web, "http req")
insert = Dataflow(web, db, "insert data")
query = Dataflow(web, db, "query")
query_resp = Dataflow(db, web, "query results", responseTo=query)
http_resp = Dataflow(web, user, "http resp")
http_resp.responseTo = http_req
self.assertTrue(tm.check())
self.assertEqual(http_req.response, http_resp)
self.assertIs(http_resp.isResponse, True)
self.assertIs(query_resp.isResponse, True)
self.assertEqual(query_resp.responseTo, query)
self.assertEqual(query.response, query_resp)
self.assertIsNone(insert.response)
self.assertIs(insert.isResponse, False)
def test_defaults(self):
tm = TM("TM")
user_data = Data("HTTP")
user = Actor("User", data=user_data, authenticatesDestination=True)
json_data = Data("JSON")
server = Server(
"Server", port=443, protocol="HTTPS", isEncrypted=True, data=json_data
)
sql_resp = Data("SQL resp")
db = Datastore(
"PostgreSQL",
isSQL=True,
port=5432,
protocol="PostgreSQL",
isEncrypted=False,
data=sql_resp,
)
worker = Process("Task queue worker")
req_get_data = Data("HTTP GET")
req_get = Dataflow(user, server, "HTTP GET", data=req_get_data)
server_query_data = Data("SQL")
server_query = Dataflow(server, db, "Query", data=server_query_data)
result_data = Data("Results")
result = Dataflow(db, server, "Results", data=result_data, isResponse=True)
resp_get_data = Data("HTTP Response")
resp_get = Dataflow(server, user, "HTTP Response", data=resp_get_data, isResponse=True)
req_post_data = Data("JSON")
req_post = Dataflow(user, server, "HTTP POST", data=req_post_data)
resp_post = Dataflow(server, user, "HTTP Response", isResponse=True)
sql_data = Data("SQL")
worker_query = Dataflow(worker, db, "Query", data=sql_data)
Dataflow(db, worker, "Results", isResponse=True)
cookie = Data("Auth Cookie", carriedBy=[req_get, req_post])
self.assertTrue(tm.check())
self.assertEqual(req_get.srcPort, -1)
self.assertEqual(req_get.dstPort, server.port)
self.assertEqual(req_get.isEncrypted, server.isEncrypted)
self.assertEqual(
req_get.authenticatesDestination, user.authenticatesDestination
)
self.assertEqual(req_get.protocol, server.protocol)
self.assertTrue(user.data.issubset(req_get.data))
self.assertEqual(server_query.srcPort, -1)
self.assertEqual(server_query.dstPort, db.port)
self.assertEqual(server_query.isEncrypted, db.isEncrypted)
self.assertEqual(
server_query.authenticatesDestination, server.authenticatesDestination
)
self.assertEqual(server_query.protocol, db.protocol)
self.assertTrue(server.data.issubset(server_query.data))
self.assertEqual(result.srcPort, db.port)
self.assertEqual(result.dstPort, -1)
self.assertEqual(result.isEncrypted, db.isEncrypted)
self.assertEqual(result.authenticatesDestination, False)
self.assertEqual(result.protocol, db.protocol)
self.assertTrue(db.data.issubset(result.data))
self.assertEqual(resp_get.srcPort, server.port)
self.assertEqual(resp_get.dstPort, -1)
self.assertEqual(resp_get.isEncrypted, server.isEncrypted)
self.assertEqual(resp_get.authenticatesDestination, False)
self.assertEqual(resp_get.protocol, server.protocol)
self.assertTrue(server.data.issubset(resp_get.data))
self.assertEqual(req_post.srcPort, -1)
self.assertEqual(req_post.dstPort, server.port)
self.assertEqual(req_post.isEncrypted, server.isEncrypted)
self.assertEqual(
req_post.authenticatesDestination, user.authenticatesDestination
)
self.assertEqual(req_post.protocol, server.protocol)
self.assertTrue(user.data.issubset(req_post.data))
self.assertEqual(resp_post.srcPort, server.port)
self.assertEqual(resp_post.dstPort, -1)
self.assertEqual(resp_post.isEncrypted, server.isEncrypted)
self.assertEqual(resp_post.authenticatesDestination, False)
self.assertEqual(resp_post.protocol, server.protocol)
self.assertTrue(server.data.issubset(resp_post.data))
self.assertListEqual(server.inputs, [req_get, req_post])
self.assertListEqual(server.outputs, [server_query])
self.assertListEqual(worker.inputs, [])
self.assertListEqual(worker.outputs, [worker_query])
self.assertListEqual(cookie.carriedBy, [req_get, req_post])
self.assertSetEqual(set(cookie.processedBy), set([user, server]))
self.assertIn(cookie, req_get.data)
self.assertSetEqual(
set([d.name for d in req_post.data]), set([cookie.name, "HTTP", "JSON"])
)
class TestMethod(unittest.TestCase):
def test_defaults(self):
tm = TM("my test tm", description="aa", isOrdered=True)
internet = Boundary("Internet")
cloud = Boundary("Cloud")
user = Actor("User", inBoundary=internet)
server = Server("Server")
db = Datastore("DB", inBoundary=cloud, isSQL=True)
func = Datastore("Lambda function", inBoundary=cloud)
request = Dataflow(user, server, "request")
response = Dataflow(server, user, "response", isResponse=True)
user_query = Dataflow(user, db, "user query")
server_query = Dataflow(server, db, "server query")
func_query = Dataflow(func, db, "func query")
default_target = ["Actor", "Boundary", "Dataflow", "Datastore", "Server"]
testCases = [
{"target": server, "condition": "target.oneOf(Server, Datastore)"},
{"target": server, "condition": "not target.oneOf(Actor, Dataflow)"},
{"target": request, "condition": "target.crosses(Boundary)"},
{"target": user_query, "condition": "target.crosses(Boundary)"},
{"target": server_query, "condition": "target.crosses(Boundary)"},
{"target": func_query, "condition": "not target.crosses(Boundary)"},
{"target": func_query, "condition": "not target.enters(Boundary)"},
{"target": func_query, "condition": "not target.exits(Boundary)"},
{"target": request, "condition": "not target.enters(Boundary)"},
{"target": request, "condition": "target.exits(Boundary)"},
{"target": response, "condition": "target.enters(Boundary)"},
{"target": response, "condition": "not target.exits(Boundary)"},
{"target": user, "condition": "target.inside(Boundary)"},
{"target": func, "condition": "not any(target.inputs)"},
{
"target": server,
"condition": "any(f.sink.oneOf(Datastore) and f.sink.isSQL "
"for f in target.outputs)",
},
]
self.assertTrue(tm.check())
for case in testCases:
t = Threat(SID="", target=default_target, condition=case["condition"])
self.assertTrue(
t.apply(case["target"]),
"Failed to match {} against {}".format(
case["target"],
case["condition"],
),
)
|
misc/utils.py | hzhucn/Visual_Dialogue.pytorch | 123 | 11140043 | <filename>misc/utils.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import random
import pdb
"""
Some utility Functions.
"""
def repackage_hidden_volatile(h):
if type(h) == Variable:
return Variable(h.data, volatile=True)
else:
return tuple(repackage_hidden_volatile(v) for v in h)
def repackage_hidden(h, batch_size):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data.resize_(h.size(0), batch_size, h.size(2)).zero_())
else:
return tuple(repackage_hidden(v, batch_size) for v in h)
def clip_gradient(model):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
p.grad.data.clamp_(-5, 5)
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 0.5 every 20 epochs"""
lr = lr * (0.5 ** (epoch // 20))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def decode_txt(itow, x):
"""Function to show decode the text."""
out = []
for b in range(x.size(1)):
txt = ''
for t in range(x.size(0)):
idx = x[t,b]
if idx == 0 or idx == len(itow)+1:
break
txt += itow[str(int(idx))]
txt += ' '
out.append(txt)
return out
def l2_norm(input):
"""
input: feature that need to normalize.
output: normalziaed feature.
"""
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def sample_batch_neg(answerIdx, negAnswerIdx, sample_idx, num_sample):
"""
input:
answerIdx: batch_size
negAnswerIdx: batch_size x opt.negative_sample
output:
sample_idx = batch_size x num_sample
"""
batch_size = answerIdx.size(0)
num_neg = negAnswerIdx.size(0) * negAnswerIdx.size(1)
negAnswerIdx = negAnswerIdx.clone().view(-1)
for b in range(batch_size):
gt_idx = answerIdx[b]
for n in range(num_sample):
while True:
rand = int(random.random() * num_neg)
neg_idx = negAnswerIdx[rand]
if gt_idx != neg_idx:
sample_idx.data[b, n] = rand
break
|
sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/aio/_consumption_management_client.py | rsdoherty/azure-sdk-for-python | 2,728 | 11140065 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ConsumptionManagementClientConfiguration
from .operations import UsageDetailsOperations
from .operations import MarketplacesOperations
from .operations import BudgetsOperations
from .operations import TagsOperations
from .operations import ChargesOperations
from .operations import BalancesOperations
from .operations import ReservationsSummariesOperations
from .operations import ReservationsDetailsOperations
from .operations import ReservationRecommendationsOperations
from .operations import ReservationRecommendationDetailsOperations
from .operations import ReservationTransactionsOperations
from .operations import PriceSheetOperations
from .operations import ForecastsOperations
from .operations import Operations
from .operations import AggregatedCostOperations
from .operations import EventsOperations
from .operations import LotsOperations
from .operations import CreditsOperations
from .. import models
class ConsumptionManagementClient(object):
"""Consumption management client provides access to consumption resources for Azure Enterprise Subscriptions.
:ivar usage_details: UsageDetailsOperations operations
:vartype usage_details: azure.mgmt.consumption.aio.operations.UsageDetailsOperations
:ivar marketplaces: MarketplacesOperations operations
:vartype marketplaces: azure.mgmt.consumption.aio.operations.MarketplacesOperations
:ivar budgets: BudgetsOperations operations
:vartype budgets: azure.mgmt.consumption.aio.operations.BudgetsOperations
:ivar tags: TagsOperations operations
:vartype tags: azure.mgmt.consumption.aio.operations.TagsOperations
:ivar charges: ChargesOperations operations
:vartype charges: azure.mgmt.consumption.aio.operations.ChargesOperations
:ivar balances: BalancesOperations operations
:vartype balances: azure.mgmt.consumption.aio.operations.BalancesOperations
:ivar reservations_summaries: ReservationsSummariesOperations operations
:vartype reservations_summaries: azure.mgmt.consumption.aio.operations.ReservationsSummariesOperations
:ivar reservations_details: ReservationsDetailsOperations operations
:vartype reservations_details: azure.mgmt.consumption.aio.operations.ReservationsDetailsOperations
:ivar reservation_recommendations: ReservationRecommendationsOperations operations
:vartype reservation_recommendations: azure.mgmt.consumption.aio.operations.ReservationRecommendationsOperations
:ivar reservation_recommendation_details: ReservationRecommendationDetailsOperations operations
:vartype reservation_recommendation_details: azure.mgmt.consumption.aio.operations.ReservationRecommendationDetailsOperations
:ivar reservation_transactions: ReservationTransactionsOperations operations
:vartype reservation_transactions: azure.mgmt.consumption.aio.operations.ReservationTransactionsOperations
:ivar price_sheet: PriceSheetOperations operations
:vartype price_sheet: azure.mgmt.consumption.aio.operations.PriceSheetOperations
:ivar forecasts: ForecastsOperations operations
:vartype forecasts: azure.mgmt.consumption.aio.operations.ForecastsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.consumption.aio.operations.Operations
:ivar aggregated_cost: AggregatedCostOperations operations
:vartype aggregated_cost: azure.mgmt.consumption.aio.operations.AggregatedCostOperations
:ivar events: EventsOperations operations
:vartype events: azure.mgmt.consumption.aio.operations.EventsOperations
:ivar lots: LotsOperations operations
:vartype lots: azure.mgmt.consumption.aio.operations.LotsOperations
:ivar credits: CreditsOperations operations
:vartype credits: azure.mgmt.consumption.aio.operations.CreditsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ConsumptionManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.usage_details = UsageDetailsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.marketplaces = MarketplacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.budgets = BudgetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tags = TagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.charges = ChargesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.balances = BalancesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reservations_summaries = ReservationsSummariesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reservations_details = ReservationsDetailsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reservation_recommendations = ReservationRecommendationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reservation_recommendation_details = ReservationRecommendationDetailsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reservation_transactions = ReservationTransactionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.price_sheet = PriceSheetOperations(
self._client, self._config, self._serialize, self._deserialize)
self.forecasts = ForecastsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.aggregated_cost = AggregatedCostOperations(
self._client, self._config, self._serialize, self._deserialize)
self.events = EventsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.lots = LotsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.credits = CreditsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ConsumptionManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
tools/pythonpkg/tests/fast/api/test_insert_into.py | AldoMyrtaj/duckdb | 2,816 | 11140066 | import duckdb
from pandas import DataFrame
import pytest
class TestInsertInto(object):
def test_insert_into_schema(self, duckdb_cursor):
# open connection
con = duckdb.connect()
con.execute('CREATE SCHEMA s')
con.execute('CREATE TABLE s.t (id INTEGER PRIMARY KEY)')
# make relation
df = DataFrame([1],columns=['id'])
rel = con.from_df(df)
rel.insert_into('s.t')
assert con.execute("select * from s.t").fetchall() == [(1,)]
# This should fail since this will go to default schema
with pytest.raises(RuntimeError):
rel.insert_into('t')
#If we add t in the default schema it should work.
con.execute('CREATE TABLE t (id INTEGER PRIMARY KEY)')
rel.insert_into('t')
assert con.execute("select * from t").fetchall() == [(1,)] |
atcoder/abc053/a.py | Ashindustry007/competitive-programming | 506 | 11140095 | #!/usr/bin/env python3
# https://abc053.contest.atcoder.jp/tasks/abc053_a
x = int(input())
if x < 1200: print('ABC')
else: print('ARC')
|
tests/unit/utils/test_static.py | fairhopeweb/warehouse | 3,103 | 11140114 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from warehouse.utils.static import ManifestCacheBuster
class TestManifestCacheBuster:
def test_returns_when_valid(self, monkeypatch):
monkeypatch.setattr(
ManifestCacheBuster,
"get_manifest",
lambda x: {"/the/path/style.css": "/the/busted/path/style.css"},
)
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/busted/path/style.css", {"keyword": "arg"})
def test_raises_when_invalid(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
with pytest.raises(ValueError):
cb(None, "/the/path/style.css", {"keyword": "arg"})
def test_returns_when_invalid_and_not_strict(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json", strict=False)
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/path/style.css", {"keyword": "arg"})
|
opendatatools/datayes/robo_agent.py | solider245/OpenData | 1,179 | 11140137 | <filename>opendatatools/datayes/robo_agent.py
from opendatatools.common import RestAgent
import pandas as pd
import json
top_items_map = {
'中国宏观' : "402273",
'行业经济' : "771263",
'国际宏观' : "1138921",
'特色数据' : "632815",
'市场行情' : 'RRP1349982',
'公司数据' : 'RRP1',
}
class RoboAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.token = ""
def _extract_result(self, response):
jsonobj = json.loads(response)
code = None
message = None
if "code" in jsonobj:
code = jsonobj['code']
if "message" in jsonobj:
message = jsonobj['message']
return code, message
def _extract_content(self, response, key='content'):
jsonobj = json.loads(response)
if key in jsonobj:
return jsonobj[key]
return None
def login(self, username, password):
url = "https://app.datayes.com/server/usermaster/authenticate/v1.json"
param = {
'x-requested-with': 'XMLHttpRequest',
'username' : username,
'password' : password,
}
'''
{"code":0,"message":"Success","content":{"result":"FAIL","userId":0,"principalName":"<EMAIL>","tenantId":0,"accountId":0}}
{"code":0,"message":"Success","content":{"result":"SUCCESS","userId":9957,"principalName":"<EMAIL>","tenantId":0,"token":{"tokenString":"<KEY>","type":"WEB","expiry":1533996916631,"expired":false},"redirectUrl":"https://app.wmcloud.com/cloud-portal/#/portal","accountId":18975}}
'''
response = self.do_request(url, param=param, method='POST')
if response is None:
return None, '获取数据失败'
code, message = self._extract_result(response)
if code is None or code != 0:
return False, "登录失败:" + message
content = self._extract_content(response)
if content is None:
return False, "登录失败,没有返回content"
result = content['result']
if result == "FAIL":
return False, "登录失败"
tokenString = content['token']['tokenString']
self.token = tokenString
return True, "登录成功"
def get_top_items(self):
return top_items_map
def get_sub_items(self, itemid):
url = 'https://gw.datayes.com/rrp/web/supervisor/macro/%s' % itemid
response = self.do_request(url)
if response is None:
return None, "获取数据失败"
code, message = self._extract_result(response)
if code is None or code != 1:
return None, "获取数据失败:" + message
data = self._extract_content(response, 'data')
if data is None:
return None, "获取数据失败,没有返回data"
df = pd.DataFrame(data['childData'])
return df, ""
def get_series(self, seriesid):
url = "https://gw.datayes.com/rrp/web/dataCenter/indic/%s?compare=false" % seriesid
response = self.do_request(url, method='GET', encoding='gzip')
if response is None:
return None, None, "获取数据失败"
code, message = self._extract_result(response)
if code is None or code != 1:
return None, None, "获取数据失败:" + message
data = self._extract_content(response, 'data')
if data is None:
return None, None, "获取数据失败,没有返回data"
df_data = pd.DataFrame(data['data'])
info = data['indic']
return df_data, info, ""
|
9_Alexnet_fastai/alexnet.py | Sara-Rajaee/Deep_learning_explorations | 154 | 11140141 | # Import necessary packages
import torch.nn as nn
import numpy as np
class AlexNet(nn.Module):
def __init__(self, features, n_class=1000):
super(AlexNet, self).__init__()
self.features = features
# (FC=>ACT=>BN=>DO)x2=>FC=>SOFTMAX
self.classifier = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(),
nn.BatchNorm1d(4096),
nn.Dropout(p = 0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.BatchNorm1d(4096),
nn.Dropout(p=0.5),
nn.Linear(4096, n_class)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def make_layers():
layers = []
in_channels = 3
## CONV=>ACT=>BN=>POOL=>DO
layers += [nn.Conv2d(in_channels, out_channels = 96, kernel_size = 11, stride = 4, padding = 2)]
layers += [nn.ReLU()]
layers += [nn.BatchNorm2d(96)]
layers += [nn.MaxPool2d(kernel_size = 3, stride = 2)]
layers += [nn.Dropout(p = 0.25)]
## CONV=>ACT=>BN=>POOL=>DO
layers += [nn.Conv2d(96, out_channels = 256, kernel_size = 5, stride = 1, padding = 2)]
layers += [nn.ReLU()]
layers += [nn.BatchNorm2d(256)]
layers += [nn.MaxPool2d(kernel_size = 3, stride = 2)]
layers += [nn.Dropout(p = 0.25)]
## ((CONV=>ACT=>BN)x3
in_channels = 256
for op in [384, 384, 256]:
layers += [nn.Conv2d(in_channels, op, kernel_size = 3, padding = 1)]
layers += [nn.ReLU()]
layers += [nn.BatchNorm2d(op)]
in_channels = op
## POOL=>DO
layers += [nn.MaxPool2d(kernel_size = 3, stride = 2)]
layers += [nn.Dropout(p=0.25)]
return nn.Sequential(*layers)
def ALEXNet(**kwargs):
model = AlexNet(make_layers(), **kwargs)
return model
|
tests/models/test_standard_absolute_deviation.py | selimfirat/pysad | 155 | 11140166 |
def test_standard_absolute_deviation():
from pysad.models import StandardAbsoluteDeviation
import numpy as np
from numpy.testing import assert_raises
from pysad.utils import fix_seed
fix_seed(61)
X = np.random.rand(150, 1)
model = StandardAbsoluteDeviation(substracted_statistic="mean")
model = model.fit(X)
y_pred = model.score(X)
assert y_pred.shape == (X.shape[0],)
model = StandardAbsoluteDeviation(substracted_statistic="median")
model = model.fit(X)
y_pred = model.score(X)
assert y_pred.shape == (X.shape[0],)
with assert_raises(ValueError):
StandardAbsoluteDeviation(substracted_statistic="asd")
with assert_raises(ValueError):
StandardAbsoluteDeviation(substracted_statistic=None)
|
parallel_wavegan/losses/adversarial_loss.py | A-Quarter-Mile/ParallelWaveGAN | 1,023 | 11140167 | # -*- coding: utf-8 -*-
# Copyright 2021 <NAME>
# MIT License (https://opensource.org/licenses/MIT)
"""Adversarial loss modules."""
import torch
import torch.nn.functional as F
class GeneratorAdversarialLoss(torch.nn.Module):
"""Generator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize GeneratorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.criterion = self._mse_loss
else:
self.criterion = self._hinge_loss
def forward(self, outputs):
"""Calcualate generator adversarial loss.
Args:
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs.
Returns:
Tensor: Generator adversarial loss value.
"""
if isinstance(outputs, (tuple, list)):
adv_loss = 0.0
for i, outputs_ in enumerate(outputs):
if isinstance(outputs_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_ = outputs_[-1]
adv_loss += self.criterion(outputs_)
if self.average_by_discriminators:
adv_loss /= i + 1
else:
adv_loss = self.criterion(outputs)
return adv_loss
def _mse_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _hinge_loss(self, x):
return -x.mean()
class DiscriminatorAdversarialLoss(torch.nn.Module):
"""Discriminator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize DiscriminatorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.fake_criterion = self._mse_fake_loss
self.real_criterion = self._mse_real_loss
else:
self.fake_criterion = self._hinge_fake_loss
self.real_criterion = self._hinge_real_loss
def forward(self, outputs_hat, outputs):
"""Calcualate discriminator adversarial loss.
Args:
outputs_hat (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from generator outputs.
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from groundtruth.
Returns:
Tensor: Discriminator real loss value.
Tensor: Discriminator fake loss value.
"""
if isinstance(outputs, (tuple, list)):
real_loss = 0.0
fake_loss = 0.0
for i, (outputs_hat_, outputs_) in enumerate(zip(outputs_hat, outputs)):
if isinstance(outputs_hat_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_hat_ = outputs_hat_[-1]
outputs_ = outputs_[-1]
real_loss += self.real_criterion(outputs_)
fake_loss += self.fake_criterion(outputs_hat_)
if self.average_by_discriminators:
fake_loss /= i + 1
real_loss /= i + 1
else:
real_loss = self.real_criterion(outputs)
fake_loss = self.fake_criterion(outputs_hat)
return real_loss, fake_loss
def _mse_real_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _mse_fake_loss(self, x):
return F.mse_loss(x, x.new_zeros(x.size()))
def _hinge_real_loss(self, x):
return -torch.mean(torch.min(x - 1, x.new_zeros(x.size())))
def _hinge_fake_loss(self, x):
return -torch.mean(torch.min(-x - 1, x.new_zeros(x.size())))
|
tensorboard/main_lib.py | Digitaltransform/tensorboard | 6,139 | 11140169 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for TensorBoard main module."""
import os
import sys
import absl.logging
from tensorboard.compat import tf
def global_init():
"""Modifies the global environment for running TensorBoard as main.
This functions changes global state in the Python process, so it should
not be called from library routines.
"""
# TF versions prior to 1.15.0 included default GCS filesystem caching logic
# that interacted pathologically with the pattern of reads used by TensorBoard
# for logdirs. See: https://github.com/tensorflow/tensorboard/issues/1225
# The problematic behavior was fixed in 1.15.0 by
# https://github.com/tensorflow/tensorflow/commit/e43b94649d3e1ac5d538e4eca9166b899511d681
# but for older versions of TF, we avoid a regression by setting this env var to
# disable the cache, which must be done before the first import of tensorflow.
os.environ["GCS_READ_CACHE_DISABLED"] = "1"
if getattr(tf, "__version__", "stub") == "stub":
print(
"TensorFlow installation not found - running with reduced feature set.",
file=sys.stderr,
)
# Only emit log messages at WARNING and above by default to reduce spam.
absl.logging.set_verbosity(absl.logging.WARNING)
|
junit2htmlreport/matrix.py | camresp/junit2html | 112 | 11140176 | <filename>junit2htmlreport/matrix.py
"""
Handle multiple parsed junit reports
"""
from __future__ import unicode_literals
import os
from . import parser
from .common import ReportContainer
from .parser import SKIPPED, FAILED, PASSED, ABSENT
from .render import HTMLMatrix, HTMLReport
UNTESTED = "untested"
PARTIAL_PASS = "partial pass"
PARTIAL_FAIL = "partial failure"
TOTAL_FAIL = "total failure"
class ReportMatrix(ReportContainer):
"""
Load and handle several report files
"""
def __init__(self):
super(ReportMatrix, self).__init__()
self.cases = {}
self.classes = {}
self.casenames = {}
self.result_stats = {}
self.case_results = {}
def add_case_result(self, case):
testclass = case.testclass.name
casename = case.name
if testclass not in self.case_results:
self.case_results[testclass] = {}
if casename not in self.case_results[testclass]:
self.case_results[testclass][casename] = []
self.case_results[testclass][casename].append(case.outcome())
def report_order(self):
return sorted(self.reports.keys())
def short_outcome(self, outcome):
if outcome == PASSED:
return "/"
elif outcome == SKIPPED:
return "s"
elif outcome == FAILED:
return "f"
elif outcome == TOTAL_FAIL:
return "F"
elif outcome == PARTIAL_PASS:
return "%"
elif outcome == PARTIAL_FAIL:
return "X"
elif outcome == UNTESTED:
return "U"
return "?"
def add_report(self, filename):
"""
Load a report into the matrix
:param filename:
:return:
"""
parsed = parser.Junit(filename=filename)
filename = os.path.basename(filename)
self.reports[filename] = parsed
for suite in parsed.suites:
for testclass in suite.classes:
if testclass not in self.classes:
self.classes[testclass] = {}
if testclass not in self.casenames:
self.casenames[testclass] = list()
self.classes[testclass][filename] = suite.classes[testclass]
for testcase in self.classes[testclass][filename].cases:
name = testcase.name.strip()
if name not in self.casenames[testclass]:
self.casenames[testclass].append(name)
if testclass not in self.cases:
self.cases[testclass] = {}
if name not in self.cases[testclass]:
self.cases[testclass][name] = {}
self.cases[testclass][name][filename] = testcase
outcome = testcase.outcome()
self.add_case_result(testcase)
self.result_stats[outcome] = 1 + self.result_stats.get(
outcome, 0)
def summary(self):
"""
Render a summary of the matrix
:return:
"""
raise NotImplementedError()
def combined_result_list(self, classname, casename):
"""
Combone the result of all instances of the given case
:param classname:
:param casename:
:return:
"""
if classname in self.case_results:
if casename in self.case_results[classname]:
results = self.case_results[classname][casename]
return self.combined_result(results)
return " ", ""
def combined_result(self, results):
"""
Given a list of results, produce a "combined" overall result
:param results:
:return:
"""
if results:
if PASSED in results:
if FAILED in results:
return self.short_outcome(PARTIAL_FAIL), PARTIAL_FAIL.title()
return self.short_outcome(PASSED), PASSED.title()
if FAILED in results:
return self.short_outcome(FAILED), FAILED.title()
if SKIPPED in results:
return self.short_outcome(UNTESTED), UNTESTED.title()
return " ", ""
class HtmlReportMatrix(ReportMatrix):
"""
Render a matrix report as html
"""
def __init__(self, outdir):
super(HtmlReportMatrix, self).__init__()
self.outdir = outdir
def add_report(self, filename):
"""
Load a report
"""
super(HtmlReportMatrix, self).add_report(filename)
basename = os.path.basename(filename)
# make the individual report too
report = self.reports[basename].html()
if self.outdir != "" and not os.path.exists(self.outdir):
os.makedirs(self.outdir)
with open(
os.path.join(self.outdir, basename) + ".html", "wb") as filehandle:
filehandle.write(report.encode("utf-8"))
def short_outcome(self, outcome):
if outcome == PASSED:
return "ok"
return super(HtmlReportMatrix, self).short_outcome(outcome)
def short_axis(self, axis):
if axis.endswith(".xml"):
return axis[:-4]
return axis
def summary(self):
"""
Render the html
:return:
"""
html_matrix = HTMLMatrix(self)
return str(html_matrix)
class TextReportMatrix(ReportMatrix):
"""
Render a matrix report as text
"""
def summary(self):
"""
Render as a string
:return:
"""
output = "\nMatrix Test Report\n"
output += "===================\n"
axis = list(self.reports.keys())
axis.sort()
# find the longest classname or test case name
left_indent = 0
for classname in self.classes:
left_indent = max(len(classname), left_indent)
for casename in self.casenames[classname]:
left_indent = max(len(casename), left_indent)
# render the axis headings in a stepped tree
treelines = ""
for filename in self.report_order():
output += "{} {}{}\n".format(" " * left_indent, treelines,
filename)
treelines += "| "
output += "{} {}\n".format(" " * left_indent, treelines)
# render in groups of the same class
for classname in self.classes:
# new class
output += "{} \n".format(classname)
# print the case name
for casename in sorted(set(self.casenames[classname])):
output += "- {}{} ".format(casename,
" " * (left_indent - len(casename)))
# print each test and its result for each axis
case_data = ""
for axis in self.report_order():
if axis not in self.cases[classname][casename]:
case_data += " "
else:
testcase = self.cases[classname][casename][axis]
if testcase.skipped:
case_data += "s "
elif testcase.failure:
case_data += "f "
else:
case_data += "/ "
combined, combined_name = self.combined_result(
self.case_results[classname][testcase.name])
output += case_data
output += " {} {}\n".format(combined, combined_name)
# print the result stats
output += "\n"
output += "-" * 79
output += "\n"
output += "Test Results:\n"
for outcome in sorted(self.result_stats):
output += " {:<12} : {:>6}\n".format(
outcome.title(),
self.result_stats[outcome])
return output
|
scitbx/cross_entropy.py | dperl-sol/cctbx_project | 155 | 11140185 | from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex
from scitbx.python_utils import random_transform
import sys
from six.moves import range
from six.moves import zip
class cross_entropy_optimizer(object):
def __init__(self, function, mean, sigma, alpha, beta, q=7, elite_size=10, sample_size=50, eps=1e-7, inject_eps=1e-3, n_max=50000, monitor_cycle=50):
self.function = function
self.mean = mean
self.sigma = sigma
self.alpha = alpha
self.beta = beta
self.q = q
self.n = function.n
self.n_in = elite_size
self.sample_size = sample_size
self.eps = eps
self.inject_eps = inject_eps
self.monitor_cycle = monitor_cycle
self.n_max = n_max
self.count = 0.0
self.monitor_cycle=monitor_cycle
self.last_mean = self.mean.deep_copy()
self.last_sigma = self.sigma.deep_copy()
self.best_sol = self.mean.deep_copy()
self.best_score = 1e90
self.best_score = self.compute_target( self.best_sol )
self.best_sol_found = 0
converged = False
while not converged:
self.count += 1.0
self.generate_new_means_and_sigmas()
self.inject()
converged = self.convergence_test()
def inject(self,c=3.0):
mdelta = flex.max(self.mean - self.last_mean)
sdelta = flex.min(self.sigma)
if sdelta < self.inject_eps:
self.sigma = self.sigma + max(mdelta*c,c*self.inject_eps)
self.last_mean = self.mean.deep_copy()
self.last_sigma = self.sigma.deep_copy()
def compute_target(self,x):
t = self.function.target(x)
if t < self.best_score:
self.best_score = t
self.best_sol = x.deep_copy()
self.best_sol_found = self.count
return t
def generate_and_score_samples(self):
sample_list = []
target_list = flex.double()
for ii in range(self.sample_size):
x = random_transform.t_variate(a=max(2,self.n-1),N=self.n)
x = x*self.sigma + self.mean
t = self.compute_target(x )
sample_list.append( x )
target_list.append( t )
order = flex.sort_permutation( flex.double(target_list) )
return sample_list, t, order
def generate_new_means_and_sigmas(self):
s,t,o = self.generate_and_score_samples()
nm = self.mean*0.0
nv = self.mean*0.0
for ii in range(self.n_in):
nm = nm + s[o[ii]]
nv = nv + s[o[ii]]*s[o[ii]]
nm = nm/self.n_in
nv = nv/self.n_in - nm*nm
nv = nv
self.mean = self.mean*(1.0-self.alpha) + self.alpha*nm
beta = self.beta -self.beta*((1.0-1.0/self.count)**self.q)
self.sigma = flex.sqrt( self.sigma*self.sigma*(1.0-beta) + beta*nv)
self.compute_target( self.mean )
def print_status(self,out=None):
if out is None:
out = sys.stdout
print(" Cycle: %i"%self.count, file=out)
for mm in self.best_sol:
print("%5.3e "%mm, file=out)
print("Target : %5.3e"%self.best_score, file=out)
print(file=out)
def convergence_test(self):
max_var = flex.max( self.sigma )
if max_var < self.eps:
return True
if self.count - self.best_sol_found > self.monitor_cycle:
return True
if self.count == self.n_max:
return True
return False
class test_rosenbrock_function(object):
def __init__(self, dim=4):
self.n = dim*2
self.dim = dim
self.means = flex.double( self.n, 2.0 )
self.sigmas = flex.double( self.n, 5.0 )
self.target_count=0
self.optimizer = cross_entropy_optimizer(self,
mean=self.means,
sigma=self.sigmas,
alpha=0.75,
beta=0.75,
q=8.5,
elite_size=10,
sample_size=50, inject_eps=1e-4,
monitor_cycle=500)
self.sol = self.optimizer.best_sol
for ii in self.sol:
assert abs( ii-1.0 ) < 1e-2
def target(self, vector):
self.target_count += 1
x_vec = vector[0:self.dim]
y_vec = vector[self.dim:]
result=0
for x,y in zip(x_vec,y_vec):
result+=100.0*((y-x*x)**2.0) + (1-x)**2.0
return result
def run():
flex.set_random_seed(0)
test_rosenbrock_function(1)
if __name__ == "__main__":
run()
print("OK")
|
tests/functional/fixtures/authentication.py | pombredanne/h | 2,103 | 11140194 | import pytest
__all__ = (
"user",
"login_user",
"with_logged_in_user",
"with_logged_in_staff_member",
"with_logged_in_admin",
)
@pytest.fixture
def user(factories):
return factories.User()
@pytest.fixture
def login_user(db_session, app, user):
def login_user(staff=False, admin=False):
# This is the hash for `pass` used below
user.password = <PASSWORD>"
user.staff = staff
user.admin = admin
db_session.commit()
login_page = app.get("/login")
login_page.form["username"] = user.username
login_page.form["password"] = "<PASSWORD>"
login_page.form.submit()
return login_user
@pytest.fixture
def with_logged_in_user(login_user):
login_user()
@pytest.fixture
def with_logged_in_staff_member(login_user):
login_user(staff=True)
@pytest.fixture
def with_logged_in_admin(login_user):
login_user(admin=True)
|
.venv/lib/python3.8/site-packages/numpy/core/__init__.py | acrucetta/Chicago_COVI_WebApp | 6,989 | 11140195 | """
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
Please note that this module is private. All functions and objects
are available in the main ``numpy`` namespace - use that instead.
"""
from numpy.version import version as __version__
import os
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python%d.%d from "%s"
* The NumPy version is: "%s"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
# Check that multiarray,umath are pure python modules wrapping
# _multiarray_umath and not either of the old c-extension modules
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from . import defchararray as char
from . import records as rec
from .records import *
from .memmap import *
from .defchararray import chararray
from . import function_base
from .function_base import *
from . import machar
from .machar import *
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += rec.__all__
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += machar.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
# The name numpy.core._ufunc_reconstruct must be
# available for unpickling to work.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
# nested. This makes it possible to pickle non-toplevel ufuncs such as
# scipy.special.expit for instance.
mod = __import__(module, fromlist=[name])
return getattr(mod, name)
def _ufunc_reduce(func):
from pickle import whichmodule
name = func.__name__
return _ufunc_reconstruct, (whichmodule(func, name), name)
import copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
del _ufunc_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
mediasoup-client/deps/webrtc/src/build/android/lighttpd_server.py | skgwazap/mediasoup-client-android | 128 | 11140199 | <filename>mediasoup-client/deps/webrtc/src/build/android/lighttpd_server.py
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a convenient wrapper for spawning a test lighttpd instance.
Usage:
lighttpd_server PATH_TO_DOC_ROOT
"""
import codecs
import contextlib
import httplib
import os
import random
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from pylib import constants
from pylib import pexpect
class LighttpdServer(object):
"""Wraps lighttpd server, providing robust startup.
Args:
document_root: Path to root of this server's hosted files.
port: TCP port on the _host_ machine that the server will listen on. If
omitted it will attempt to use 9000, or if unavailable it will find
a free port from 8001 - 8999.
lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
base_config_path: If supplied this file will replace the built-in default
lighttpd config file.
extra_config_contents: If specified, this string will be appended to the
base config (default built-in, or from base_config_path).
config_path, error_log, access_log: Optional paths where the class should
place temporary files for this session.
"""
def __init__(self, document_root, port=None,
lighttpd_path=None, lighttpd_module_path=None,
base_config_path=None, extra_config_contents=None,
config_path=None, error_log=None, access_log=None):
self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
self.document_root = os.path.abspath(document_root)
self.fixed_port = port
self.port = port or constants.LIGHTTPD_DEFAULT_PORT
self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
self.base_config_path = base_config_path
self.extra_config_contents = extra_config_contents
self.config_path = config_path or self._Mktmp('config')
self.error_log = error_log or self._Mktmp('error_log')
self.access_log = access_log or self._Mktmp('access_log')
self.pid_file = self._Mktmp('pid_file')
self.process = None
def _Mktmp(self, name):
return os.path.join(self.temp_dir, name)
@staticmethod
def _GetRandomPort():
# The ports of test server is arranged in constants.py.
return random.randint(constants.LIGHTTPD_RANDOM_PORT_FIRST,
constants.LIGHTTPD_RANDOM_PORT_LAST)
def StartupHttpServer(self):
"""Starts up a http server with specified document root and port."""
# If we want a specific port, make sure no one else is listening on it.
if self.fixed_port:
self._KillProcessListeningOnPort(self.fixed_port)
while True:
if self.base_config_path:
# Read the config
with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
config_contents = f.read()
else:
config_contents = self._GetDefaultBaseConfig()
if self.extra_config_contents:
config_contents += self.extra_config_contents
# Write out the config, filling in placeholders from the members of |self|
with codecs.open(self.config_path, 'w', 'utf-8') as f:
f.write(config_contents % self.__dict__)
if (not os.path.exists(self.lighttpd_path) or
not os.access(self.lighttpd_path, os.X_OK)):
raise EnvironmentError(
'Could not find lighttpd at %s.\n'
'It may need to be installed (e.g. sudo apt-get install lighttpd)'
% self.lighttpd_path)
# pylint: disable=no-member
self.process = pexpect.spawn(self.lighttpd_path,
['-D', '-f', self.config_path,
'-m', self.lighttpd_module_path],
cwd=self.temp_dir)
client_error, server_error = self._TestServerConnection()
if not client_error:
assert int(open(self.pid_file, 'r').read()) == self.process.pid
break
self.process.close()
if self.fixed_port or 'in use' not in server_error:
print 'Client error:', client_error
print 'Server error:', server_error
return False
self.port = self._GetRandomPort()
return True
def ShutdownHttpServer(self):
"""Shuts down our lighttpd processes."""
if self.process:
self.process.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _TestServerConnection(self):
# Wait for server to start
server_msg = ''
for timeout in xrange(1, 5):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
'127.0.0.1', self.port, timeout=timeout)) as http:
http.set_debuglevel(timeout > 3)
http.request('HEAD', '/')
r = http.getresponse()
r.read()
if (r.status == 200 and r.reason == 'OK' and
r.getheader('Server') == self.server_tag):
return (None, server_msg)
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as client_error:
pass # Probably too quick connecting: try again
# Check for server startup error messages
# pylint: disable=no-member
ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
timeout=timeout)
if ix == 2: # stdout spew from the server
server_msg += self.process.match.group(0) # pylint: disable=no-member
elif ix == 1: # EOF -- server has quit so giveup.
client_error = client_error or 'Server exited'
break
return (client_error or 'Timeout', server_msg)
@staticmethod
def _KillProcessListeningOnPort(port):
"""Checks if there is a process listening on port number |port| and
terminates it if found.
Args:
port: Port number to check.
"""
if subprocess.call(['fuser', '-kv', '%d/tcp' % port]) == 0:
# Give the process some time to terminate and check that it is gone.
time.sleep(2)
assert subprocess.call(['fuser', '-v', '%d/tcp' % port]) != 0, \
'Unable to kill process listening on port %d.' % port
@staticmethod
def _GetDefaultBaseConfig():
return """server.tag = "%(server_tag)s"
server.modules = ( "mod_access",
"mod_accesslog",
"mod_alias",
"mod_cgi",
"mod_rewrite" )
# default document root required
#server.document-root = "."
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.pl", "index.cgi",
"index.html", "index.htm", "default.htm" )
# mimetype mapping
mimetype.assign = (
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "application/x-javascript",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".manifest" => "text/cache-manifest",
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
mimetype.use-xattr = "enable"
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
server.bind = "127.0.0.1"
server.port = %(port)s
## virtual directory listings
dir-listing.activate = "enable"
#dir-listing.encoding = "iso-8859-2"
#dir-listing.external-css = "style/oldstyle.css"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
#### SSL engine
#ssl.engine = "enable"
#ssl.pemfile = "server.pem"
# Autogenerated test-specific config follows.
cgi.assign = ( ".cgi" => "/usr/bin/env",
".pl" => "/usr/bin/env",
".asis" => "/bin/cat",
".php" => "/usr/bin/php-cgi" )
server.errorlog = "%(error_log)s"
accesslog.filename = "%(access_log)s"
server.upload-dirs = ( "/tmp" )
server.pid-file = "%(pid_file)s"
server.document-root = "%(document_root)s"
"""
def main(argv):
server = LighttpdServer(*argv[1:])
try:
if server.StartupHttpServer():
raw_input('Server running at http://127.0.0.1:%s -'
' press Enter to exit it.' % server.port)
else:
print 'Server exit code:', server.process.exitstatus
finally:
server.ShutdownHttpServer()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
var/spack/repos/builtin/packages/r-kernlab/package.py | kkauder/spack | 2,360 | 11140221 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RKernlab(RPackage):
"""Kernel-Based Machine Learning Lab
Kernel-based machine learning methods for classification, regression,
clustering, novelty detection, quantile regression and dimensionality
reduction. Among other methods 'kernlab' includes Support Vector Machines,
Spectral Clustering, Kernel PCA, Gaussian Processes and a QP solver."""
homepage = "https://cloud.r-project.org/package=kernlab"
url = "https://cloud.r-project.org/src/contrib/kernlab_0.9-25.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/kernlab"
version('0.9-29', sha256='c3da693a0041dd34f869e7b63a8d8cf7d4bc588ac601bcdddcf7d44f68b3106f')
version('0.9-27', sha256='f6add50ed4097f04d09411491625f8d46eafc4f003b1c1cff78a6fff8cc31dd4')
version('0.9-26', sha256='954940478c6fcf60433e50e43cf10d70bcb0a809848ca8b9d683bf371cd56077')
version('0.9-25', sha256='b9de072754bb03c02c4d6a5ca20f2290fd090de328b55ab334ac0b397ac2ca62')
depends_on('[email protected]:', type=('build', 'run'))
|
Char01 DQN/DQN_CartPole-v0.py | Yexiong-Zeng/Deep-reinforcement-learning-with-pytorch | 2,224 | 11140240 | <gh_stars>1000+
import argparse
import pickle
from collections import namedtuple
from itertools import count
import os, time
import numpy as np
import matplotlib.pyplot as plt
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal, Categorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
# Hyper-parameters
seed = 1
render = False
num_episodes = 2000
env = gym.make('CartPole-v0').unwrapped
num_state = env.observation_space.shape[0]
num_action = env.action_space.n
torch.manual_seed(seed)
env.seed(seed)
Transition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state'])
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(num_state, 100)
self.fc2 = nn.Linear(100, num_action)
def forward(self, x):
x = F.relu(self.fc1(x))
action_value = self.fc2(x)
return action_value
class DQN():
capacity = 8000
learning_rate = 1e-3
memory_count = 0
batch_size = 256
gamma = 0.995
update_count = 0
def __init__(self):
super(DQN, self).__init__()
self.target_net, self.act_net = Net(), Net()
self.memory = [None]*self.capacity
self.optimizer = optim.Adam(self.act_net.parameters(), self.learning_rate)
self.loss_func = nn.MSELoss()
self.writer = SummaryWriter('./DQN/logs')
def select_action(self,state):
state = torch.tensor(state, dtype=torch.float).unsqueeze(0)
value = self.act_net(state)
action_max_value, index = torch.max(value, 1)
action = index.item()
if np.random.rand(1) >= 0.9: # epslion greedy
action = np.random.choice(range(num_action), 1).item()
return action
def store_transition(self,transition):
index = self.memory_count % self.capacity
self.memory[index] = transition
self.memory_count += 1
return self.memory_count >= self.capacity
def update(self):
if self.memory_count >= self.capacity:
state = torch.tensor([t.state for t in self.memory]).float()
action = torch.LongTensor([t.action for t in self.memory]).view(-1,1).long()
reward = torch.tensor([t.reward for t in self.memory]).float()
next_state = torch.tensor([t.next_state for t in self.memory]).float()
reward = (reward - reward.mean()) / (reward.std() + 1e-7)
with torch.no_grad():
target_v = reward + self.gamma * self.target_net(next_state).max(1)[0]
#Update...
for index in BatchSampler(SubsetRandomSampler(range(len(self.memory))), batch_size=self.batch_size, drop_last=False):
v = (self.act_net(state).gather(1, action))[index]
loss = self.loss_func(target_v[index].unsqueeze(1), (self.act_net(state).gather(1, action))[index])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.writer.add_scalar('loss/value_loss', loss, self.update_count)
self.update_count +=1
if self.update_count % 100 ==0:
self.target_net.load_state_dict(self.act_net.state_dict())
else:
print("Memory Buff is too less")
def main():
agent = DQN()
for i_ep in range(num_episodes):
state = env.reset()
if render: env.render()
for t in range(10000):
action = agent.select_action(state)
next_state, reward, done, info = env.step(action)
if render: env.render()
transition = Transition(state, action, reward, next_state)
agent.store_transition(transition)
state = next_state
if done or t >=9999:
agent.writer.add_scalar('live/finish_step', t+1, global_step=i_ep)
agent.update()
if i_ep % 10 == 0:
print("episodes {}, step is {} ".format(i_ep, t))
break
if __name__ == '__main__':
main()
|
corehq/apps/domain/views/tombstone.py | akashkj/commcare-hq | 471 | 11140246 | from datetime import datetime
from crispy_forms import layout as crispy
from django.contrib import messages
from django.http import HttpResponseRedirect
from corehq.apps.domain.dbaccessors import iter_all_domains_and_deleted_domains_with_name
from corehq.apps.domain.models import Domain
from corehq.apps.hqwebapp.crispy import FormActions, HQFormHelper
from django import forms
from django.utils.decorators import method_decorator
from corehq.apps.domain.decorators import require_superuser
from corehq.apps.hqadmin.views import BaseAdminSectionView
from django.utils.translation import ugettext_lazy as _
from corehq.util import reverse
@method_decorator(require_superuser, name='dispatch')
class TombstoneManagement(BaseAdminSectionView):
urlname = 'tombstone_management'
page_title = _("Prevent the use of specific domain names")
template_name = 'domain/tombstone_management.html'
form = None
domain_results = None
def get_context_data(self, **kwargs):
return {
'form': self.form or TombstoneManagementForm(),
'domains': self.domain_results or [],
}
def get(self, request, *args, **kwargs):
self.form = TombstoneManagementForm(self.request.GET)
if self.form.is_valid():
domain_names = self.form.cleaned_data['domains']
self.domain_results = []
for domain in domain_names:
projects = list(iter_all_domains_and_deleted_domains_with_name(domain))
self.domain_results.append((domain, projects))
return super().get(request, *args, **kwargs)
@require_superuser
def create_tombstone(request):
domain = request.POST.get('domain')
project = Domain.get_by_name(domain)
if project:
messages.error(
request,
"Could not create tombstone for {} because that domain already exists"
.format(domain))
else:
project = Domain(
doc_type='Domain-Deleted',
name=domain,
hr_name='{} (Created as a tombstone)'.format(domain),
is_active=False,
date_created=datetime.utcnow(),
creating_user=request.couch_user.username,
secure_submissions=True,
first_domain_for_user=False,
)
project.save()
messages.success(request, "Successfully created a tombstone for {}".format(domain))
return HttpResponseRedirect(reverse(TombstoneManagement.urlname))
class TombstoneManagementForm(forms.Form):
csv_domain_list = forms.CharField(
label="Comma separated domains",
widget=forms.Textarea()
)
@staticmethod
def split_csv(comma_separated_list):
return list(
filter(None, (domain.strip() for domain in comma_separated_list.split(','))))
def clean(self):
csv_domain_list = self.cleaned_data.get('csv_domain_list', '')
self.cleaned_data['domains'] = self.split_csv(csv_domain_list)
return self.cleaned_data
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = HQFormHelper()
self.helper.form_method = 'get'
self.helper.layout = crispy.Layout(
'csv_domain_list',
FormActions(
crispy.Submit(
'',
'Check Domains'
)
)
)
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/google/protobuf/internal/text_encoding_test.py | tharindu1st/apim-migration-resources | 14,668 | 11140275 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.text_encoding."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import text_encoding
TEST_VALUES = [
("foo\\rbar\\nbaz\\t",
"foo\\rbar\\nbaz\\t",
b"foo\rbar\nbaz\t"),
("\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
b"'full of \"sound\" and \"fury\"'"),
("signi\\\\fying\\\\ nothing\\\\",
"signi\\\\fying\\\\ nothing\\\\",
b"signi\\fying\\ nothing\\"),
("\\010\\t\\n\\013\\014\\r",
"\x08\\t\\n\x0b\x0c\\r",
b"\010\011\012\013\014\015")]
class TextEncodingTestCase(unittest.TestCase):
def testCEscape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(escaped,
text_encoding.CEscape(unescaped, as_utf8=False))
self.assertEqual(escaped_utf8,
text_encoding.CEscape(unescaped, as_utf8=True))
def testCUnescape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(unescaped, text_encoding.CUnescape(escaped))
self.assertEqual(unescaped, text_encoding.CUnescape(escaped_utf8))
if __name__ == "__main__":
unittest.main()
|
meltingpot/python/utils/scenarios/wrappers/all_observations_wrapper_test.py | vishalbelsare/meltingpot | 132 | 11140293 | <filename>meltingpot/python/utils/scenarios/wrappers/all_observations_wrapper_test.py
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for all_observations_wrapper."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import numpy as np
from meltingpot.python.utils.scenarios.wrappers import all_observations_wrapper
from meltingpot.python.utils.scenarios.wrappers import base
GLOBAL_KEY = all_observations_wrapper.GLOBAL_KEY
REWARDS_KEY = all_observations_wrapper.REWARDS_KEY
OBSERVATIONS_KEY = all_observations_wrapper.OBSERVATIONS_KEY
ACTIONS_KEY = all_observations_wrapper.ACTIONS_KEY
ACTION_1 = 'action_1'
ACTION_2 = 'action_2'
OBSERVATION_1 = 'observation_1'
OBSERVATION_2 = 'observation_2'
ACTION_SPEC = dm_env.specs.DiscreteArray(num_values=5, dtype=np.int32)
REWARD_SPEC = dm_env.specs.Array(shape=[], dtype=np.float32)
class AllObservationsWrapperTest(parameterized.TestCase):
def test_observation_spec(self):
env = mock.Mock(spec_set=base.Substrate)
env.observation_spec.return_value = [{
OBSERVATION_1: dm_env.specs.Array(shape=[1], dtype=np.float32),
OBSERVATION_2: dm_env.specs.Array(shape=[2], dtype=np.float32),
}] * 2
env.action_spec.return_value = [ACTION_SPEC] * 2
env.reward_spec.return_value = [REWARD_SPEC] * 2
wrapped = all_observations_wrapper.Wrapper(
env,
observations_to_share=[OBSERVATION_1],
share_actions=True,
share_rewards=True)
actual = wrapped.observation_spec()
expected = [{
OBSERVATION_1: dm_env.specs.Array(shape=[1], dtype=np.float32),
OBSERVATION_2: dm_env.specs.Array(shape=[2], dtype=np.float32),
GLOBAL_KEY: {
OBSERVATIONS_KEY: {
OBSERVATION_1:
dm_env.specs.Array(
shape=[2, 1], dtype=np.float32, name=OBSERVATION_1)
},
REWARDS_KEY: REWARD_SPEC.replace(shape=[2], name=REWARDS_KEY),
ACTIONS_KEY: dm_env.specs.BoundedArray(
shape=[2], dtype=ACTION_SPEC.dtype, minimum=ACTION_SPEC.minimum,
maximum=ACTION_SPEC.maximum, name=ACTIONS_KEY),
}
}] * 2
self.assertEqual(actual, expected)
def test_reset(self):
env = mock.Mock(spec_set=base.Substrate)
env.action_spec.return_value = [ACTION_SPEC] * 2
env.reward_spec.return_value = [REWARD_SPEC] * 2
env.reset.return_value = dm_env.restart([
{
OBSERVATION_1: np.ones([1]),
OBSERVATION_2: np.ones([2]),
},
{
OBSERVATION_1: np.ones([1]) * 2,
OBSERVATION_2: np.ones([2]) * 2,
},
])._replace(reward=[np.array(0), np.array(0)])
wrapped = all_observations_wrapper.Wrapper(
env,
observations_to_share=[OBSERVATION_1],
share_actions=True,
share_rewards=True)
actual = wrapped.reset()
expected = dm_env.restart([
{
OBSERVATION_1: np.ones([1]),
OBSERVATION_2: np.ones([2]),
GLOBAL_KEY: {
OBSERVATIONS_KEY: {
OBSERVATION_1: np.array([[1.], [2.]])
},
REWARDS_KEY: np.zeros([2], dtype=REWARD_SPEC.dtype),
ACTIONS_KEY: np.zeros([2], dtype=ACTION_SPEC.dtype),
}
},
{
OBSERVATION_1: np.ones([1]) * 2,
OBSERVATION_2: np.ones([2]) * 2,
GLOBAL_KEY: {
OBSERVATIONS_KEY: {
OBSERVATION_1: np.array([[1.], [2.]])
},
REWARDS_KEY: np.zeros([2], dtype=REWARD_SPEC.dtype),
ACTIONS_KEY: np.zeros([2], dtype=ACTION_SPEC.dtype),
}
},
])._replace(reward=[np.array(0), np.array(0)])
np.testing.assert_equal(actual, expected)
def test_step(self):
env = mock.Mock(spec_set=base.Substrate)
env.action_spec.return_value = [ACTION_SPEC] * 2
env.reward_spec.return_value = [REWARD_SPEC] * 2
env.step.return_value = dm_env.transition(
reward=[np.array(1), np.array(2)],
observation=[
{
OBSERVATION_1: np.ones([1]),
OBSERVATION_2: np.ones([2]),
},
{
OBSERVATION_1: np.ones([1]) * 2,
OBSERVATION_2: np.ones([2]) * 2,
},
])
wrapped = all_observations_wrapper.Wrapper(
env,
observations_to_share=[OBSERVATION_1],
share_actions=True,
share_rewards=True)
actual = wrapped.step([3, 4])
expected = dm_env.transition(
reward=[np.array(1), np.array(2)],
observation=[
{
OBSERVATION_1: np.ones([1]),
OBSERVATION_2: np.ones([2]),
GLOBAL_KEY: {
OBSERVATIONS_KEY: {
OBSERVATION_1: np.array([[1.], [2.]])
},
REWARDS_KEY: np.array([1, 2], dtype=REWARD_SPEC.dtype),
ACTIONS_KEY: np.array([3, 4], dtype=ACTION_SPEC.dtype),
}
},
{
OBSERVATION_1: np.ones([1]) * 2,
OBSERVATION_2: np.ones([2]) * 2,
GLOBAL_KEY: {
OBSERVATIONS_KEY: {
OBSERVATION_1: np.array([[1.], [2.]])
},
REWARDS_KEY: np.array([1, 2], dtype=REWARD_SPEC.dtype),
ACTIONS_KEY: np.array([3, 4], dtype=ACTION_SPEC.dtype),
}
},
],
)
np.testing.assert_equal(actual, expected)
if __name__ == '__main__':
absltest.main()
|
model/solver.py | jsedoc/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 167 | 11140326 | from itertools import cycle
import numpy as np
import torch
import torch.nn as nn
import models
from layers import masked_cross_entropy
from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric
import os
from tqdm import tqdm
from math import isnan
import re
import math
import pickle
import gensim
word2vec_path = "../datasets/GoogleNews-vectors-negative300.bin"
class Solver(object):
def __init__(self, config, train_data_loader, eval_data_loader, vocab, is_train=True, model=None):
self.config = config
self.epoch_i = 0
self.train_data_loader = train_data_loader
self.eval_data_loader = eval_data_loader
self.vocab = vocab
self.is_train = is_train
self.model = model
@time_desc_decorator('Build Graph')
def build(self, cuda=True):
if self.model is None:
self.model = getattr(models, self.config.model)(self.config)
# orthogonal initialiation for hidden weights
# input gate bias for GRUs
if self.config.mode == 'train' and self.config.checkpoint is None:
print('Parameter initiailization')
for name, param in self.model.named_parameters():
if 'weight_hh' in name:
print('\t' + name)
nn.init.orthogonal_(param)
# bias_hh is concatenation of reset, input, new gates
# only set the input gate bias to 2.0
if 'bias_hh' in name:
print('\t' + name)
dim = int(param.size(0) / 3)
param.data[dim:2 * dim].fill_(2.0)
if torch.cuda.is_available() and cuda:
self.model.cuda()
# Overview Parameters
print('Model Parameters')
for name, param in self.model.named_parameters():
print('\t' + name + '\t', list(param.size()))
if self.config.checkpoint:
self.load_model(self.config.checkpoint)
if self.is_train:
self.writer = TensorboardWriter(self.config.logdir)
self.optimizer = self.config.optimizer(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.config.learning_rate)
def save_model(self, epoch):
"""Save parameters to checkpoint"""
ckpt_path = os.path.join(self.config.save_path, f'{epoch}.pkl')
print(f'Save parameters to {ckpt_path}')
torch.save(self.model.state_dict(), ckpt_path)
def load_model(self, checkpoint):
"""Load parameters from checkpoint"""
print(f'Load parameters from {checkpoint}')
epoch = re.match(r"[0-9]*", os.path.basename(checkpoint)).group(0)
self.epoch_i = int(epoch)
self.model.load_state_dict(torch.load(checkpoint))
def write_summary(self, epoch_i):
epoch_loss = getattr(self, 'epoch_loss', None)
if epoch_loss is not None:
self.writer.update_loss(
loss=epoch_loss,
step_i=epoch_i + 1,
name='train_loss')
epoch_recon_loss = getattr(self, 'epoch_recon_loss', None)
if epoch_recon_loss is not None:
self.writer.update_loss(
loss=epoch_recon_loss,
step_i=epoch_i + 1,
name='train_recon_loss')
epoch_kl_div = getattr(self, 'epoch_kl_div', None)
if epoch_kl_div is not None:
self.writer.update_loss(
loss=epoch_kl_div,
step_i=epoch_i + 1,
name='train_kl_div')
kl_mult = getattr(self, 'kl_mult', None)
if kl_mult is not None:
self.writer.update_loss(
loss=kl_mult,
step_i=epoch_i + 1,
name='kl_mult')
epoch_bow_loss = getattr(self, 'epoch_bow_loss', None)
if epoch_bow_loss is not None:
self.writer.update_loss(
loss=epoch_bow_loss,
step_i=epoch_i + 1,
name='bow_loss')
validation_loss = getattr(self, 'validation_loss', None)
if validation_loss is not None:
self.writer.update_loss(
loss=validation_loss,
step_i=epoch_i + 1,
name='validation_loss')
@time_desc_decorator('Training Start!')
def train(self):
epoch_loss_history = []
for epoch_i in range(self.epoch_i, self.config.n_epoch):
self.epoch_i = epoch_i
batch_loss_history = []
self.model.train()
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.train_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
# reset gradient
self.optimizer.zero_grad()
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences,
decode=False)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
if batch_i % self.config.print_every == 0:
tqdm.write(
f'Epoch: {epoch_i+1}, iter {batch_i}: loss = {batch_loss.item()/ n_words.item():.3f}')
# Back-propagation
batch_loss.backward()
# Gradient cliping
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)
# Run optimizer
self.optimizer.step()
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_loss_history.append(epoch_loss)
self.epoch_loss = epoch_loss
print_str = f'Epoch {epoch_i+1} loss average: {epoch_loss:.3f}'
print(print_str)
if epoch_i % self.config.save_every_epoch == 0:
self.save_model(epoch_i + 1)
print('\n<Validation>...')
self.validation_loss = self.evaluate()
if epoch_i % self.config.plot_every_epoch == 0:
self.write_summary(epoch_i)
self.save_model(self.config.n_epoch)
return epoch_loss_history
def generate_sentence(self, input_sentences, input_sentence_length,
input_conversation_length, target_sentences):
self.model.eval()
# [batch_size, max_seq_len, vocab_size]
generated_sentences = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences,
decode=True)
# write output to file
with open(os.path.join(self.config.save_path, 'samples.txt'), 'a') as f:
f.write(f'<Epoch {self.epoch_i}>\n\n')
tqdm.write('\n<Samples>')
for input_sent, target_sent, output_sent in zip(input_sentences, target_sentences, generated_sentences):
input_sent = self.vocab.decode(input_sent)
target_sent = self.vocab.decode(target_sent)
output_sent = '\n'.join([self.vocab.decode(sent) for sent in output_sent])
s = '\n'.join(['Input sentence: ' + input_sent,
'Ground truth: ' + target_sent,
'Generated response: ' + output_sent + '\n'])
f.write(s + '\n')
print(s)
print('')
def evaluate(self):
self.model.eval()
batch_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
if batch_i == 0:
self.generate_sentence(input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
print_str = f'Validation loss: {epoch_loss:.3f}\n'
print(print_str)
return epoch_loss
def test(self):
self.model.eval()
batch_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
print(f'Number of words: {n_total_words}')
print(f'Bits per word: {epoch_loss:.3f}')
word_perplexity = np.exp(epoch_loss)
print_str = f'Word perplexity : {word_perplexity:.3f}\n'
print(print_str)
return word_perplexity
def embedding_metric(self):
word2vec = getattr(self, 'word2vec', None)
if word2vec is None:
print('Loading word2vec model')
word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
self.word2vec = word2vec
keys = word2vec.vocab
self.model.eval()
n_context = self.config.n_context
n_sample_step = self.config.n_sample_step
metric_average_history = []
metric_extrema_history = []
metric_greedy_history = []
context_history = []
sample_history = []
n_sent = 0
n_conv = 0
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
conv_indices = [i for i in range(len(conversations)) if len(conversations[i]) >= n_context + n_sample_step]
context = [c for i in conv_indices for c in [conversations[i][:n_context]]]
ground_truth = [c for i in conv_indices for c in [conversations[i][n_context:n_context + n_sample_step]]]
sentence_length = [c for i in conv_indices for c in [sentence_length[i][:n_context]]]
with torch.no_grad():
context = to_var(torch.LongTensor(context))
sentence_length = to_var(torch.LongTensor(sentence_length))
samples = self.model.generate(context, sentence_length, n_context)
context = context.data.cpu().numpy().tolist()
samples = samples.data.cpu().numpy().tolist()
context_history.append(context)
sample_history.append(samples)
samples = [[self.vocab.decode(sent) for sent in c] for c in samples]
ground_truth = [[self.vocab.decode(sent) for sent in c] for c in ground_truth]
samples = [sent for c in samples for sent in c]
ground_truth = [sent for c in ground_truth for sent in c]
samples = [[word2vec[s] for s in sent.split() if s in keys] for sent in samples]
ground_truth = [[word2vec[s] for s in sent.split() if s in keys] for sent in ground_truth]
indices = [i for i, s, g in zip(range(len(samples)), samples, ground_truth) if s != [] and g != []]
samples = [samples[i] for i in indices]
ground_truth = [ground_truth[i] for i in indices]
n = len(samples)
n_sent += n
metric_average = embedding_metric(samples, ground_truth, word2vec, 'average')
metric_extrema = embedding_metric(samples, ground_truth, word2vec, 'extrema')
metric_greedy = embedding_metric(samples, ground_truth, word2vec, 'greedy')
metric_average_history.append(metric_average)
metric_extrema_history.append(metric_extrema)
metric_greedy_history.append(metric_greedy)
epoch_average = np.mean(np.concatenate(metric_average_history), axis=0)
epoch_extrema = np.mean(np.concatenate(metric_extrema_history), axis=0)
epoch_greedy = np.mean(np.concatenate(metric_greedy_history), axis=0)
print('n_sentences:', n_sent)
print_str = f'Metrics - Average: {epoch_average:.3f}, Extrema: {epoch_extrema:.3f}, Greedy: {epoch_greedy:.3f}'
print(print_str)
print('\n')
return epoch_average, epoch_extrema, epoch_greedy
class VariationalSolver(Solver):
def __init__(self, config, train_data_loader, eval_data_loader, vocab, is_train=True, model=None):
self.config = config
self.epoch_i = 0
self.train_data_loader = train_data_loader
self.eval_data_loader = eval_data_loader
self.vocab = vocab
self.is_train = is_train
self.model = model
@time_desc_decorator('Training Start!')
def train(self):
epoch_loss_history = []
kl_mult = 0.0
conv_kl_mult = 0.0
for epoch_i in range(self.epoch_i, self.config.n_epoch):
self.epoch_i = epoch_i
batch_loss_history = []
recon_loss_history = []
kl_div_history = []
kl_div_sent_history = []
kl_div_conv_history = []
bow_loss_history = []
self.model.train()
n_total_words = 0
# self.evaluate()
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.train_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
# reset gradient
self.optimizer.zero_grad()
sentence_logits, kl_div, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
batch_loss = recon_loss + kl_mult * kl_div
batch_loss_history.append(batch_loss.item())
recon_loss_history.append(recon_loss.item())
kl_div_history.append(kl_div.item())
n_total_words += n_words.item()
if self.config.bow:
bow_loss = self.model.compute_bow_loss(target_conversations)
batch_loss += bow_loss
bow_loss_history.append(bow_loss.item())
assert not isnan(batch_loss.item())
if batch_i % self.config.print_every == 0:
print_str = f'Epoch: {epoch_i+1}, iter {batch_i}: loss = {batch_loss.item() / n_words.item():.3f}, recon = {recon_loss.item() / n_words.item():.3f}, kl_div = {kl_div.item() / n_words.item():.3f}'
if self.config.bow:
print_str += f', bow_loss = {bow_loss.item() / n_words.item():.3f}'
tqdm.write(print_str)
# Back-propagation
batch_loss.backward()
# Gradient cliping
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)
# Run optimizer
self.optimizer.step()
kl_mult = min(kl_mult + 1.0 / self.config.kl_annealing_iter, 1.0)
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_loss_history.append(epoch_loss)
epoch_recon_loss = np.sum(recon_loss_history) / n_total_words
epoch_kl_div = np.sum(kl_div_history) / n_total_words
self.kl_mult = kl_mult
self.epoch_loss = epoch_loss
self.epoch_recon_loss = epoch_recon_loss
self.epoch_kl_div = epoch_kl_div
print_str = f'Epoch {epoch_i+1} loss average: {epoch_loss:.3f}, recon_loss: {epoch_recon_loss:.3f}, kl_div: {epoch_kl_div:.3f}'
if bow_loss_history:
self.epoch_bow_loss = np.sum(bow_loss_history) / n_total_words
print_str += f', bow_loss = {self.epoch_bow_loss:.3f}'
print(print_str)
if epoch_i % self.config.save_every_epoch == 0:
self.save_model(epoch_i + 1)
print('\n<Validation>...')
self.validation_loss = self.evaluate()
if epoch_i % self.config.plot_every_epoch == 0:
self.write_summary(epoch_i)
return epoch_loss_history
def generate_sentence(self, sentences, sentence_length,
input_conversation_length, input_sentences, target_sentences):
"""Generate output of decoder (single batch)"""
self.model.eval()
# [batch_size, max_seq_len, vocab_size]
generated_sentences, _, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences,
decode=True)
# write output to file
with open(os.path.join(self.config.save_path, 'samples.txt'), 'a') as f:
f.write(f'<Epoch {self.epoch_i}>\n\n')
tqdm.write('\n<Samples>')
for input_sent, target_sent, output_sent in zip(input_sentences, target_sentences, generated_sentences):
input_sent = self.vocab.decode(input_sent)
target_sent = self.vocab.decode(target_sent)
output_sent = '\n'.join([self.vocab.decode(sent) for sent in output_sent])
s = '\n'.join(['Input sentence: ' + input_sent,
'Ground truth: ' + target_sent,
'Generated response: ' + output_sent + '\n'])
f.write(s + '\n')
print(s)
print('')
def evaluate(self):
self.model.eval()
batch_loss_history = []
recon_loss_history = []
kl_div_history = []
bow_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
with torch.no_grad():
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
if batch_i == 0:
input_conversations = [conv[:-1] for conv in conversations]
input_sentences = [sent for conv in input_conversations for sent in conv]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
self.generate_sentence(sentences,
sentence_length,
input_conversation_length,
input_sentences,
target_sentences)
sentence_logits, kl_div, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
batch_loss = recon_loss + kl_div
if self.config.bow:
bow_loss = self.model.compute_bow_loss(target_conversations)
bow_loss_history.append(bow_loss.item())
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
recon_loss_history.append(recon_loss.item())
kl_div_history.append(kl_div.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_recon_loss = np.sum(recon_loss_history) / n_total_words
epoch_kl_div = np.sum(kl_div_history) / n_total_words
print_str = f'Validation loss: {epoch_loss:.3f}, recon_loss: {epoch_recon_loss:.3f}, kl_div: {epoch_kl_div:.3f}'
if bow_loss_history:
epoch_bow_loss = np.sum(bow_loss_history) / n_total_words
print_str += f', bow_loss = {epoch_bow_loss:.3f}'
print(print_str)
print('\n')
return epoch_loss
def importance_sample(self):
''' Perform importance sampling to get tighter bound
'''
self.model.eval()
weight_history = []
n_total_words = 0
kl_div_history = []
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
# n_words += sum([len([word for word in sent if word != PAD_ID]) for sent in target_sentences])
with torch.no_grad():
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
# treat whole batch as one data sample
weights = []
for j in range(self.config.importance_sample):
sentence_logits, kl_div, log_p_z, log_q_zx = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
log_w = (-recon_loss.sum() + log_p_z - log_q_zx).data
weights.append(log_w)
if j == 0:
n_total_words += n_words.item()
kl_div_history.append(kl_div.item())
# weights: [n_samples]
weights = torch.stack(weights, 0)
m = np.floor(weights.max())
weights = np.log(torch.exp(weights - m).sum())
weights = m + weights - np.log(self.config.importance_sample)
weight_history.append(weights)
print(f'Number of words: {n_total_words}')
bits_per_word = -np.sum(weight_history) / n_total_words
print(f'Bits per word: {bits_per_word:.3f}')
word_perplexity = np.exp(bits_per_word)
epoch_kl_div = np.sum(kl_div_history) / n_total_words
print_str = f'Word perplexity upperbound using {self.config.importance_sample} importance samples: {word_perplexity:.3f}, kl_div: {epoch_kl_div:.3f}\n'
print(print_str)
return word_perplexity
|
tools/regression/xsl_reports/report.py | zyiacas/boost-doc-zh | 198 | 11140334 |
# Copyright (c) MetaCommunications, Inc. 2003-2004
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import shutil
import os.path
import os
import string
import time
import sys
import utils
import runner
report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'x', 'i', 'n', 'ddr', 'dsr' ]
if __name__ == '__main__':
run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
else:
run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
def map_path( path ):
return os.path.join( run_dir, path )
def xsl_path( xsl_file_name, v2 = 0 ):
if v2:
return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
else:
return map_path( os.path.join( 'xsl', xsl_file_name ) )
def make_result_pages(
test_results_file
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, results_dir
, result_prefix
, reports
, v2
):
utils.log( 'Producing the reports...' )
__log__ = 1
output_dir = os.path.join( results_dir, result_prefix )
utils.makedirs( output_dir )
if comment_file != '':
comment_file = os.path.abspath( comment_file )
if expected_results_file != '':
expected_results_file = os.path.abspath( expected_results_file )
else:
expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
if 'x' in reports:
utils.log( ' Merging with expected results...' )
utils.libxslt(
utils.log
, test_results_file
, xsl_path( 'add_expected_results.xsl', v2 )
, extended_test_results
, { 'expected_results_file': expected_results_file
, 'failures_markup_file' : failures_markup_file
, 'source' : tag }
)
links = os.path.join( output_dir, 'links.html' )
utils.makedirs( os.path.join( output_dir, 'output' ) )
for mode in ( 'developer', 'user' ):
utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
if 'l' in reports:
utils.log( ' Making test output files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'links_page.xsl', v2 )
, links
, {
'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file': failures_markup_file
}
)
issues = os.path.join( output_dir, 'developer', 'issues.html' )
if 'i' in reports:
utils.log( ' Making issues list...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'issues_page.xsl', v2 )
, issues
, {
'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file': failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'd' in reports:
utils.log( ' Making detailed %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl', v2 )
, os.path.join( output_dir, mode, 'index.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 's' in reports:
utils.log( ' Making summary %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl', v2 )
, os.path.join( output_dir, mode, 'summary.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
}
)
if v2 and "ddr" in reports:
utils.log( ' Making detailed %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl', v2 )
, os.path.join( output_dir, "developer", 'index_release.html' )
, {
'links_file': 'links.html'
, 'mode': "developer"
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
, 'release': "yes"
}
)
if v2 and "dsr" in reports:
utils.log( ' Making summary %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl', v2 )
, os.path.join( output_dir, "developer", 'summary_release.html' )
, {
'mode' : "developer"
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
, 'release': 'yes'
}
)
if 'e' in reports:
utils.log( ' Generating expected_results ...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'produce_expected_results.xsl', v2 )
, os.path.join( output_dir, 'expected_results.xml' )
)
if v2 and 'n' in reports:
utils.log( ' Making runner comment files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'runners.xsl', v2 )
, os.path.join( output_dir, 'runners.html' )
)
shutil.copyfile(
xsl_path( 'html/master.css', v2 )
, os.path.join( output_dir, 'master.css' )
)
def build_xsl_reports(
locate_root_dir
, tag
, expected_results_file
, failures_markup_file
, comment_file
, results_dir
, result_file_prefix
, dont_collect_logs = 0
, reports = report_types
, v2 = 0
, user = None
, upload = False
):
( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
test_results_file = os.path.join( results_dir, 'test_results.xml' )
bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
if v2:
import merger
merger.merge_logs(
tag
, user
, results_dir
, test_results_file
, dont_collect_logs
)
else:
utils.log( ' dont_collect_logs: %s' % dont_collect_logs )
if not dont_collect_logs:
f = open( test_results_file, 'w+' )
f.write( '<tests>\n' )
runner.collect_test_logs( [ bin_boost_dir ], f )
f.write( '</tests>\n' )
f.close()
make_result_pages(
test_results_file
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, results_dir
, result_file_prefix
, reports
, v2
)
if v2 and upload:
upload_dir = 'regression-logs/'
utils.log( 'Uploading v2 results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
archive_name = '%s.tar.gz' % result_file_prefix
utils.tar(
os.path.join( results_dir, result_file_prefix )
, archive_name
)
utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
def accept_args( args ):
args_spec = [
'locate-root='
, 'tag='
, 'expected-results='
, 'failures-markup='
, 'comment='
, 'results-dir='
, 'results-prefix='
, 'dont-collect-logs'
, 'reports='
, 'v2'
, 'user='
, 'upload'
, 'help'
]
options = {
'--comment': ''
, '--expected-results': ''
, '--failures-markup': ''
, '--reports': string.join( report_types, ',' )
, '--tag': None
, '--user': None
, 'upload': False
}
utils.accept_args( args_spec, args, options, usage )
if not options.has_key( '--results-dir' ):
options[ '--results-dir' ] = options[ '--locate-root' ]
if not options.has_key( '--results-prefix' ):
if options.has_key( '--v2' ):
options[ '--results-prefix' ] = 'all'
else:
options[ '--results-prefix' ] = ''
return (
options[ '--locate-root' ]
, options[ '--tag' ]
, options[ '--expected-results' ]
, options[ '--failures-markup' ]
, options[ '--comment' ]
, options[ '--results-dir' ]
, options[ '--results-prefix' ]
, options.has_key( '--dont-collect-logs' )
, options[ '--reports' ].split( ',' )
, options.has_key( '--v2' )
, options[ '--user' ]
, options.has_key( '--upload' )
)
def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--locate-root the same as --locate-root in compiler_status
\t--tag the tag for the results (i.e. 'CVS-HEAD')
\t--expected-results the file with the results to be compared with
\t the current run
\t--failures-markup the file with the failures markup
\t--comment an html comment file (will be inserted in the reports)
\t--results-dir the directory containing -links.html, -fail.html
\t files produced by compiler_status (by default the
\t same as specified in --locate-root)
\t--results-prefix the prefix of -links.html, -fail.html
\t files produced by compiler_status
\t--v2 v2 reports (combine multiple runners results into a
\t single set of reports)
The following options are valid only for v2 reports:
\t--user SourceForge user name for a shell account
\t--upload upload v2 reports to SourceForge
The following options are useful in debugging:
\t--dont-collect-logs dont collect the test logs
\t--reports produce only the specified reports
\t us - user summary
\t ds - developer summary
\t ud - user detailed
\t dd - developer detailed
\t l - links
\t p - patches
\t x - extended results file
\t i - issues
'''
def main():
build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
if __name__ == '__main__':
main()
|
sim.py | flint-stone/OpenTPU | 248 | 11140337 | # coding=utf-8
import argparse
import sys
import numpy as np
from collections import deque
from math import exp
import isa
from config import MATSIZE as WIDTH
args = None
# width of the tile
#WIDTH = 16
class TPUSim(object):
def __init__(self, program_filename, dram_filename, hostmem_filename):
# TODO: switch b/w 32-bit float vs int
self.program = open(program_filename, 'rb')
self.weight_memory = np.load(dram_filename)
self.host_memory = np.load(hostmem_filename)
if not args.raw:
assert self.weight_memory.dtype == np.int8, 'DRAM weight mem is not 8-bit ints'
assert self.host_memory.dtype == np.int8, 'Hostmem not 8-bit ints'
self.unified_buffer = (np.zeros((96000, WIDTH), dtype=np.float32) if args.raw else
np.zeros((96000, WIDTH), dtype=np.int8))
self.accumulator = (np.zeros((4000, WIDTH), dtype=np.float32) if args.raw else
np.zeros((4000, WIDTH), dtype=np.int32))
self.weight_fifo = deque()
def run(self):
# load program and execute instructions
while True:
instruction = self.decode()
opcode, operands = instruction[0], instruction[1:]
if opcode in ['RHM', 'WHM', 'RW']:
self.memops(opcode, *operands)
elif opcode == 'MMC':
self.matrix_multiply_convolve(*operands)
elif opcode == 'ACT':
self.act(*operands)
elif opcode == 'SYNC':
pass
elif opcode == 'NOP':
pass
elif opcode == 'HLT':
print('H A L T')
break
else:
raise Exception('WAT (╯°□°)╯︵ ┻━┻')
# all done, exit
savepath = 'sim32.npy' if args.raw else 'sim8.npy'
np.save(savepath, self.host_memory)
print(self.host_memory.astype('uint8'))
self.program.close()
print("""ALL DONE!
(•_•)
( •_•)>⌐■-■
(⌐■_■)""")
def decode(self):
opcode = int.from_bytes(self.program.read(isa.OP_SIZE), byteorder='big')
opcode = isa.BIN2OPCODE[opcode]
flag = int.from_bytes(self.program.read(isa.FLAGS_SIZE), byteorder='big')
length = int.from_bytes(self.program.read(isa.LEN_SIZE), byteorder='big')
src_addr = int.from_bytes(self.program.read(isa.ADDR_SIZE), byteorder='big')
dest_addr = int.from_bytes(self.program.read(isa.UB_ADDR_SIZE), byteorder='big')
#print('{} decoding: len {}, flags {}, src {}, dst {}'.format(opcode, length, flag, src_addr, dest_addr))
return opcode, src_addr, dest_addr, length, flag
# opcodes
def act(self, src, dest, length, flag):
print('ACTIVATE!')
result = self.accumulator[src:src+length]
if flag & isa.FUNC_RELU_MASK:
print(' RELU!!!!')
vfunc = np.vectorize(lambda x: 0 * x if x < 0. else x)
elif flag & isa.FUNC_SIGMOID_MASK:
print(' SIGMOID')
vfunc = np.vectorize(lambda x: int(255./(1.+exp(-x))))
else:
vfunc = np.vectorize(lambda x: x)
#raise Exception('(╯°□°)╯︵ ┻━┻ bad activation function!')
result = vfunc(result)
# downsample/normalize if needed
if not args.raw:
result = [v & 0x000000FF for v in result]
self.unified_buffer[dest:dest+length] = result
def memops(self, opcode, src_addr, dest_addr, length, flag):
print('Memory xfer! host: {} unified buffer: {}: length: {} (FLAGS? {})'.format(
src_addr, dest_addr, length, flag
))
if opcode == 'RHM':
print(' read host memory to unified buffer')
self.unified_buffer[dest_addr:dest_addr + length] = self.host_memory[src_addr:src_addr + length]
elif opcode == 'WHM':
print(' write unified buffer to host memory')
self.host_memory[dest_addr:dest_addr + length] = self.unified_buffer[src_addr:src_addr + length]
elif opcode == 'RW':
print(' read weights from DRAM into MMU')
self.weight_fifo.append(self.weight_memory[src_addr])
else:
raise Exception('WAT (╯°□°)╯︵ ┻━┻')
def matrix_multiply_convolve(self, ub_addr, accum_addr, size, flags):
print('Matrix things....')
print(' UB@{} + {} -> MMU -> accumulator@{} + {}'.format(
ub_addr, size, accum_addr, size
))
inp = self.unified_buffer[ub_addr: ub_addr + size]
print('MMC input shape: {}'.format(inp.shape))
weight_mat = self.weight_fifo.popleft()
print('MMC weight: {}'.format(weight_mat))
if not args.raw:
inp = inp.astype(np.int32)
weight_mat = weight_mat.astype(np.int32)
out = np.matmul(inp, weight_mat)
print('MMC output shape: {}'.format(out.shape))
overwrite = isa.OVERWRITE_MASK & flags
if overwrite:
self.accumulator[accum_addr:accum_addr + size] = out
else:
self.accumulator[accum_addr:accum_addr + size] += out
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('program', action='store',
help='Path to assembly program file.')
parser.add_argument('host_file', action='store',
help='Path to host file.')
parser.add_argument('dram_file', action='store',
help='Path to dram file.')
parser.add_argument('--raw', action='store_true', default=False,
help='Gen sim32.npy instead of sim8.npy.')
args = parser.parse_args()
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage:', sys.argv[0], 'PROGRAM_BINARY DRAM_FILE HOST_FILE')
sys.exit(0)
parse_args()
tpusim = TPUSim(args.program, args.dram_file, args.host_file)
tpusim.run()
|
rplugin/python3/defx/util.py | Matsui54/defx.nvim | 1,229 | 11140339 | # ============================================================================
# FILE: util.py
# AUTHOR: <NAME> <Shougo.Matsu at gmail.<EMAIL>>
# License: MIT license
# ============================================================================
from pathlib import Path
from pynvim import Nvim
from sys import executable, base_exec_prefix
import importlib.util
import os
import shutil
import typing
UserContext = typing.Dict[str, typing.Any]
Candidate = typing.Dict[str, typing.Any]
Candidates = typing.List[Candidate]
def cd(vim: Nvim, path: str) -> None:
vim.call('defx#util#cd', path)
def cwd_input(vim: Nvim, cwd: str, prompt: str,
text: str = '', completion: str = '') -> str:
"""
Returns the absolute input path in cwd.
"""
save_cwd = vim.call('getcwd')
cd(vim, cwd)
filename: str = str(vim.call('defx#util#input', prompt, text, completion))
filename = filename.strip()
cd(vim, save_cwd)
return filename
def error(vim: Nvim, expr: typing.Any) -> None:
"""
Prints the error messages to Vim/Nvim's :messages buffer.
"""
if isinstance(expr, set):
expr = [str(x) for x in expr]
vim.call('defx#util#print_error', str(expr))
def confirm(vim: Nvim, question: str) -> bool:
"""
Confirm action
"""
option: int = vim.call('defx#util#confirm',
question, '&Yes\n&No\n&Cancel', 2)
return option == 1
def import_plugin(path: Path, source: str,
classname: str) -> typing.Any:
"""Import defx plugin source class.
If the class exists, add its directory to sys.path.
"""
module_name = 'defx.%s.%s' % (source, path.stem)
spec = importlib.util.spec_from_file_location(module_name, str(path))
if not spec:
return None
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
cls = getattr(module, classname, None)
return cls
def readable(path: Path) -> bool:
"""
Check {path} is readable.
"""
try:
if os.access(str(path), os.R_OK) and path.resolve():
return True
else:
return False
except Exception:
return False
def safe_call(fn: typing.Callable[..., typing.Any],
fallback: typing.Optional[bool] = None) -> typing.Any:
"""
Ignore OSError when calling {fn}
"""
try:
return fn()
except OSError:
return fallback
def get_python_exe() -> str:
if 'py' in str(Path(executable).name):
return executable
for exe in ['python3', 'python']:
which = shutil.which(exe)
if which is not None:
return which
for name in (Path(base_exec_prefix).joinpath(v) for v in [
'python3', 'python',
str(Path('bin').joinpath('python3')),
str(Path('bin').joinpath('python')),
]):
if name.exists():
return str(name)
# return sys.executable anyway. This may not work on windows
return executable
def strwidth(vim: Nvim, word: str) -> int:
return (int(vim.call('strwidth', word))
if len(word) != len(bytes(word, 'utf-8',
'surrogatepass')) else len(word))
def len_bytes(word: str) -> int:
return len(bytes(word, 'utf-8', 'surrogatepass'))
def fnamemodify(vim: Nvim, word: str, mod: str) -> str:
return str(vim.call('fnamemodify', word, mod))
|
examples/pytorch/pinsage/evaluation.py | ketyi/dgl | 9,516 | 11140341 | <reponame>ketyi/dgl
import numpy as np
import torch
import pickle
import dgl
import argparse
def prec(recommendations, ground_truth):
n_users, n_items = ground_truth.shape
K = recommendations.shape[1]
user_idx = np.repeat(np.arange(n_users), K)
item_idx = recommendations.flatten()
relevance = ground_truth[user_idx, item_idx].reshape((n_users, K))
hit = relevance.any(axis=1).mean()
return hit
class LatestNNRecommender(object):
def __init__(self, user_ntype, item_ntype, user_to_item_etype, timestamp, batch_size):
self.user_ntype = user_ntype
self.item_ntype = item_ntype
self.user_to_item_etype = user_to_item_etype
self.batch_size = batch_size
self.timestamp = timestamp
def recommend(self, full_graph, K, h_user, h_item):
"""
Return a (n_user, K) matrix of recommended items for each user
"""
graph_slice = full_graph.edge_type_subgraph([self.user_to_item_etype])
n_users = full_graph.number_of_nodes(self.user_ntype)
latest_interactions = dgl.sampling.select_topk(graph_slice, 1, self.timestamp, edge_dir='out')
user, latest_items = latest_interactions.all_edges(form='uv', order='srcdst')
# each user should have at least one "latest" interaction
assert torch.equal(user, torch.arange(n_users))
recommended_batches = []
user_batches = torch.arange(n_users).split(self.batch_size)
for user_batch in user_batches:
latest_item_batch = latest_items[user_batch].to(device=h_item.device)
dist = h_item[latest_item_batch] @ h_item.t()
# exclude items that are already interacted
for i, u in enumerate(user_batch.tolist()):
interacted_items = full_graph.successors(u, etype=self.user_to_item_etype)
dist[i, interacted_items] = -np.inf
recommended_batches.append(dist.topk(K, 1)[1])
recommendations = torch.cat(recommended_batches, 0)
return recommendations
def evaluate_nn(dataset, h_item, k, batch_size):
g = dataset['train-graph']
val_matrix = dataset['val-matrix'].tocsr()
test_matrix = dataset['test-matrix'].tocsr()
item_texts = dataset['item-texts']
user_ntype = dataset['user-type']
item_ntype = dataset['item-type']
user_to_item_etype = dataset['user-to-item-type']
timestamp = dataset['timestamp-edge-column']
rec_engine = LatestNNRecommender(
user_ntype, item_ntype, user_to_item_etype, timestamp, batch_size)
recommendations = rec_engine.recommend(g, k, None, h_item).cpu().numpy()
return prec(recommendations, val_matrix)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path', type=str)
parser.add_argument('item_embedding_path', type=str)
parser.add_argument('-k', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=32)
args = parser.parse_args()
with open(args.dataset_path, 'rb') as f:
dataset = pickle.load(f)
with open(args.item_embedding_path, 'rb') as f:
emb = torch.FloatTensor(pickle.load(f))
print(evaluate_nn(dataset, emb, args.k, args.batch_size))
|
tests/test_transactionscout.py | nhatminhbeo/mango-explorer | 131 | 11140349 | from .context import mango
from .fakes import fake_mango_instruction, fake_seeded_public_key, fake_token
from datetime import datetime
from decimal import Decimal
from solana.publickey import PublicKey
import typing
def test_transaction_instruction_constructor() -> None:
instruction_type: mango.InstructionType = mango.InstructionType.Deposit
instruction_data: typing.Dict[str, str] = {"key": "test value"}
account1 = fake_seeded_public_key("account 1")
account2 = fake_seeded_public_key("account 2")
account3 = fake_seeded_public_key("account 3")
accounts = [account1, account2, account3]
actual = mango.MangoInstruction(instruction_type, instruction_data, accounts)
assert actual is not None
assert actual.instruction_type == instruction_type
assert actual.instruction_data == instruction_data
assert actual.accounts == accounts
def test_transaction_scout_constructor() -> None:
timestamp: datetime = datetime.now()
signatures: typing.Sequence[str] = ["Signature1", "Signature2"]
succeeded: bool = True
group_name: str = "BTC_ETH_USDT"
account1: PublicKey = fake_seeded_public_key("account 1")
account2: PublicKey = fake_seeded_public_key("account 2")
account3: PublicKey = fake_seeded_public_key("account 3")
accounts: typing.Sequence[PublicKey] = [account1, account2, account3]
instructions: typing.Sequence[mango.MangoInstruction] = [fake_mango_instruction()]
messages: typing.Sequence[str] = ["Message 1", "Message 2"]
token = fake_token()
token_value = mango.InstrumentValue(token, Decimal(28))
owner = fake_seeded_public_key("owner")
owned_token_value = mango.OwnedInstrumentValue(owner, token_value)
pre_token_balances: typing.Sequence[mango.OwnedInstrumentValue] = [owned_token_value]
post_token_balances: typing.Sequence[mango.OwnedInstrumentValue] = [owned_token_value]
actual = mango.TransactionScout(timestamp, signatures, succeeded, group_name, accounts,
instructions, messages, pre_token_balances, post_token_balances)
assert actual is not None
assert actual.timestamp == timestamp
assert actual.signatures == signatures
assert actual.succeeded == succeeded
assert actual.group_name == group_name
assert actual.accounts == accounts
assert actual.instructions == instructions
assert actual.messages == messages
assert actual.pre_token_balances == pre_token_balances
assert actual.post_token_balances == post_token_balances
|
module/Handshaker/Handshaker.py | Yiidiir/SniffAir | 1,173 | 11140379 | <gh_stars>1000+
#!/usr/bin/python
import logging
logging.getLogger ( "scapy.runtime" ).setLevel ( logging.CRITICAL )
from scapy.all import *
load_contrib ( 'ppi_cace' )
import sys, os, time, signal, subprocess
import argparse
sys.path.insert ( 0, '../../lib/' )
from Queries import *
parser = argparse.ArgumentParser ()
parser.add_argument ( '-f', '--format', metavar='format', dest='format', action='store', help='Format JTR or Hashcat\n',required=True )
parser.add_argument ( '-s', '--ssid', metavar='SSID', dest='ssid', action='store', help=argparse.SUPPRESS, required=False )
parser.add_argument ( '-p', '--path', metavar='path', dest='path', action='store', help='path\n', required=False )
parser.add_argument ( '-w', '--workspace', metavar='database', dest='database', action='store', help='workspace name\n', required=True )
parser.add_argument ( '-i', '--inputfile', metavar='inputfile', dest='inputfile', action='store', help='input file path\n', required=False )
args = parser.parse_args ()
workspace = args.database
q = queries ()
ws = q.db_connect ( '../../' + workspace )
def test(pkts):
global outpath
if args.path:
outpath = args.path
else:
outpath = path
if args.ssid:
SSID_List = args.ssid
MAC_List = str ( q.show_MACs (SSID_List) )
MAC_List = MAC_List.split ( '\n' )
else:
sql = dp.read_sql ( 'select * from INSCOPE_SSIDS', ws )
if sql.empty:
print "No inscope SSIDSs found, please add a SSID before running this module again.\n"
return
else:
SSID_List = str ( q.show_inscope_ssids () )
SSID_List = SSID_List.split ( '\n' )
MAC_List = str ( q.show_inscope_MACs () )
MAC_List = MAC_List.split( '\n' )
for pkt in pkts:
if Dot11Beacon in pkt:
if str ( pkt[Dot11Elt:1].info ) == "":
SSID = "Hidden"
elif str ( pkt[Dot11Elt:1].info ).startswith ( "\000" ):
SSID = "Hidden"
else:
SSID = pkt[Dot11Elt:1].info
SSID = SSID.decode ( 'utf-8', 'ignore' )
if SSID in SSID_List:
wrpcap (outpath +'/filtered.pcap', pkt, append=True )
if pkt.haslayer ( EAPOL ):
EAPOLP = pkt[EAPOL]
if EAPOLP.type == 3:
if pkt.addr2 in MAC_List:
if str ( EAPOLP )[6:8].encode ( "hex" ) == "8a00":
wrpcap ( outpath + '/filtered.pcap', pkt, append=True )
ascii_ap_mac = pkt.addr2
ascii_client_mac = pkt.addr1
aNonce = str ( EAPOLP )[17:49].encode ( "hex" )
print "Frame 1"
print "AP MAC: " + ascii_ap_mac
print "Client MAC: " + ascii_client_mac
print "ANonce: " + aNonce
elif str ( EAPOLP )[6:8].encode ( "hex" ) == "0a00" and str ( EAPOLP )[99:123].encode ( "hex" ):
if pkt.addr2 in MAC_List:
wrpcap ( outpath + '/filtered.pcap', pkt, append=True )
ascii_ap_mac = pkt.addr2
ascii_client_mac = pkt.addr1
sNonce = str ( EAPOLP )[17:49].encode ( "hex" )
mic = str ( EAPOLP )[81:97].encode ( "hex" )
data = str ( EAPOLP )[99:123].encode ( "hex" )
print "Frame 2"
print "AP MAC: " + ascii_ap_mac
print "Client MAC: " + ascii_client_mac
print "SNonce: " + sNonce
print "MIC: " + mic
print "Data: " + data
else:
return
if args.inputfile == "None":
pullpath = args.inputfile
sniff(offline=fullpath, count=0, store=0, prn=test)
else:
path = workspace.split("/")
path = '/'.join(path[0:2])
path = "../../"+path
for file in os.listdir (path):
if file.endswith ( ".pcapdump" ):
fullpath = (os.path.join ( path, file ))
print fullpath
sniff ( offline=fullpath, count=0, store=0, prn=test )
if args.format == "JTR":
subprocess.call ( 'aircrack-ng -J' + outpath + '/filtered.pcap > ' +outpath + '/test1.hccap', shell=True )
subprocess.call ( 'hccap2john '+ outpath +'/test1.hccap > '+ outpath +'/hccap.john', shell=True )
print "john -wordlist=<path to wordlist> -format=wpapsk \"hccap.john\""
if args.format == "Hashcat":
subprocess.call ( './cap2hccapx.bin filtered.pcap output.hccapx >/dev/null 2>&1', shell=True )
print "oclHashcat64.exe -m 2500 -a3 capture.hccapx ?d?d?d?d?d?d?d?d"
print " or"
print "oclHashcat64.exe -m 2500 -a0 <path to wordlist> capture.hccapx"
if args.format == "both":
subprocess.call ( './cap2hccapx.bin '+ outpath +'/filtered.pcap '+ outpath +'/output.hccapx >/dev/null 2>&1', shell=True )
subprocess.call ( 'aircrack-ng -J '+ outpath +'/filtered.pcap '+ outpath +'/test1.hccap', shell=True )
subprocess.call ( 'hccap2john '+ outpath +'/test1.hccap > '+ outpath +'/hccap.john', shell=True )
print "john -wordlist=<path to wordlist> -format=wpapsk \"hccap.john\""
print "oclHashcat64.exe -m 2500 -a3 capture.hccapx ?d?d?d?d?d?d?d?d"
print " or"
print "oclHashcat64.exe -m 2500 -a0 <path to wordlist> capture.hccapx"
subprocess.call ( 'rm -rf '+ outpath +'/filtered.pcap', shell=True )
subprocess.call ( 'rm -rf '+ outpath +'/test1.hccap', shell=True )
|
scripts/ch4_proximity.py | mathkann/understanding-random-forests | 353 | 11140394 | import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from scipy.spatial.distance import pdist, squareform
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.manifold import MDS
def rf_proximities(forest, X):
prox = pdist(forest.apply(X), lambda u, v: (u == v).sum()) / forest.n_estimators
prox = squareform(prox)
return prox
data = load_digits()
X, y = data.data, data.target
indices = np.argsort(y)
X = X[indices]
y = y[indices]
# X = X[y < 2]
# y = y[y < 2]
forest = RandomForestClassifier(n_estimators=50, n_jobs=2, random_state=1).fit(X, y)
prox = rf_proximities(forest, X)
plt.matshow(prox, cmap="Reds")
plt.show()
model = MDS(dissimilarity="precomputed", n_jobs=2)
coords = model.fit_transform(1. - prox)
n_classes = forest.n_classes_
cm = plt.get_cmap("hsv")
colors = (cm(1. * i / n_classes) for i in range(n_classes))
for k, c in zip(range(n_classes), colors):
plt.plot(coords[y == k, 0], coords[y == k, 1], '.', label=k, color=c)
plt.legend(loc="best")
plt.show()
|
www/tests/inject_name_in_module.py | raspberrypieman/brython | 5,926 | 11140398 | <gh_stars>1000+
def yyy():
return xxx*2 |
retool-gui.py | rufotheone-fr/retool | 117 | 11140414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" retool-gui.py: GUI version of Retool for Windows.
https://github.com/unexpectedpanda/retool
"""
import os
import platform
import PySimpleGUIQt as sg
import retool
import sys
import updateclonelists
import webbrowser
from modules.classes import Filters, RegionKeys, UserInput
from modules.importdata import build_regions
from modules.output import generate_config
from modules.userinput import import_user_config, import_user_filters
from modules.xml import process_input_dat
# Generate regions and languages from internal-config.json
region_data = build_regions(RegionKeys())
# Platform-specific setup
font = 'Any'
scale_multiplier = 1
if sys.platform.startswith('win'):
import ctypes
# Fix the taskbar icon not loading on Windows
if sys.argv[0].endswith('.exe') == False:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(u'retool.retool.retool.retool')
# Fonts
font = 'Segoe UI, Tahoma, Arial'
# Get the scale factor
if float(platform.release()) > 9:
scale_factor = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100
os.environ['QT_AUTO_SCREEN_SCALE_FACTOR'] = '1'
if scale_factor >= 1.5:
os.environ['QT_SCALE_FACTOR'] = '0.8'
elif scale_factor == 1.25 or scale_factor == 2.25:
os.environ['QT_SCALE_FACTOR'] = '1'
scale_multiplier = 1.3
elif (
float(platform.release()) >= 8
and float(platform.release()) <= 8.1):
ctypes.windll.shcore.SetProcessDpiAwareness(0)
elif 'linux' in sys.platform:
font = 'Ubuntu, DejVu Sans, FreeSans'
scale_multiplier = 1.1
# Set defaults
sg.theme('SystemDefaultForReal')
sg.SetOptions(font=(font, 10))
def main():
__version__ = '0.11'
# Generate user config file if it's missing
generate_config(region_data.languages_long, region_data.region_order, False, False, True)
# Menu
menu = [
['&File', ['&Check for clone list updates', '&Exit']],
['&Help', ['&Wiki', '&Github', '&About...']]]
# Exclusions
tab_exclusions = [
[sg.Text('Title types to exclude from the output dat',
font=(font, 9, 'bold'),
pad=(30,30))],
[sg.HorizontalSeparator()],
generate_checkbox(['Add-ons', 'Educational'], 30*scale_multiplier, ['Exclude titles with the dat category "Add-Ons" -- these\ninclude expansion packs and additional materials for titles', 'Exclude titles with the dat category "Educational"']),
generate_checkbox(['Applications', 'Manuals'], 30*scale_multiplier, ['Exclude titles with the dat category "Applications"\nor with the following text in the name:\n\n* (Program)\n* (Test Program)\n* Check Program\n* Sample Program', 'Exclude titles with the "(Manual)" in the name']),
generate_checkbox(['Audio', 'Multimedia'], 30*scale_multiplier, ['Exclude titles with the dat category "Audio"\n-- these might be used as soundtracks by games', 'Exclude titles with the dat category "Multimedia"\n-- these might include games']),
generate_checkbox(['Bad dumps', 'Pirate'], 30*scale_multiplier, ['Exclude titles with "[b]" in the name', 'Exclude titles with "(Pirate)" in the name']),
generate_checkbox(['BIOS and other chips', 'Preproduction'], 30*scale_multiplier, ['Exclude titles with the dat category "Console"\nor with the following text in the name:\n\n* [BIOS]\n* (Enhancement Chip)', 'Exclude titles with the dat category "Preproduction" or with the\nfollowing text in the name:\n\n* (Alpha [0-99])\n* (Beta [0-99])\n* (Pre-Production)\n* (Possible Proto)\n* (Proto [0-99])\n* (Review Code)']),
generate_checkbox(['Bonus discs', 'Promotional'], 30*scale_multiplier, ['Exclude titles with the dat category "Bonus Discs" -- these\ncould be anything other than the main title content,\nlike patches, manuals, collector discs, or otherwise', 'Exclude titles with the dat category "Promotional" or with the\nfollowing text in the name:\n\n* (Promo)\n* EPK\n* Press Kit']),
generate_checkbox(['Coverdiscs', 'Unlicensed'], 30*scale_multiplier, ['Exclude titles with the dat category "Coverdiscs" -- these\nwere discs that were attached to the front of magazines', 'Exclude titles with "(Unl)" in the name']),
generate_checkbox(['Demos, kiosks, and samples', 'Video'], 30*scale_multiplier, ['Exclude titles with the dat category "Demos" or with the\nfollowing text in the name:\n\n* @barai\n* (Demo [1-9])\n* (Demo-CD)\n* (GameCube Preview\n* (Kiosk *|* Kiosk)\n* (Preview)\n* Kiosk Demo Disc\n* PS2 Kiosk\n* PSP System Kiosk\n* Sample\n* Taikenban\n* Trial Edition', 'Exclude titles with the dat category "Video"']),
]
# Modes
tab_modes = [
[sg.Text('Modes to enable',
font=(font, 9, 'bold'),
pad=(30,30))],
[sg.HorizontalSeparator()],
generate_checkbox(['Include titles that don\'t have hashes, ROMs, or disks specified'], 50*scale_multiplier, ['Not recommended\n\nBy default, Retool removes these titles from the output dat']),
generate_checkbox(['Don\'t replace (Unl) and (Aftermarket) titles if a production version is found in another region'], 60*scale_multiplier, ['By default, Retool prefers production titles from lower regions over\n(Unl) and (Aftermarket) titles from higher regions']),
generate_checkbox(['Titles ripped from modern platform rereleases replace standard editions'], 50*scale_multiplier, ['Not recommended\n\nThese titles are ripped from modern platforms like Virtual Console,\nand might not work with emulators']),
generate_checkbox(['Output dat in legacy parent/clone format'], 50*scale_multiplier, ['Not recommended for use with dat managers\n\nUse for the following things:\n\n* CloneRel\n* Manually analyzing parent/clone relationships created by Retool\n* Diffing outputs in order to update clone lists']),
generate_checkbox(['Disable custom global and system filters'], 50*scale_multiplier, ['User-defined strings that include or exclude\ntitles Retool ordinarily wouldn\'t']),
generate_checkbox(['Also output lists of what titles have been kept and removed'], 50*scale_multiplier, ['In addition to the output dat, two text files will be\nproduced that detail the changes Retool has made,\nseparated by the types of changes']),
generate_checkbox(['Also output a list of just the 1G1R title names'], 50*scale_multiplier, ['In addition to the output dat, generate a list with each\n1G1R title name taking a new line, and no extra formatting']),
[sg.Text('')],
[
sg.Text('', size=(20,15)),
sg.Text('Add this text to the start of each title (start with http://, https://, or ftp:// to URL encode each line)', key='prefix-label', visible=False)],
[
sg.Text('', size=(20,15)),
sg.Input(enable_events=True, key='prefix-input', size=(300*scale_multiplier, 35*scale_multiplier), visible=False)],
[sg.Text('')],
[
sg.Text('', size=(20,15)),
sg.Text('Add this text to the end of each title', key='suffix-label', visible=False)],
[
sg.Text('', size=(20,15)),
sg.Input(enable_events=True, key='suffix-input', size=(300*scale_multiplier, 35*scale_multiplier), visible=False)]
]
# User filters
tab_global_filters = [
[sg.Text('Custom global filters (all dats)',
font=(font, 9, 'bold'),
pad=(30,30))],
[sg.HorizontalSeparator()],
[sg.Text('Exclude or include specific titles by adding your own text '
'strings to match against. Each string should\nbe on its own '
'line, and is case sensitive. See the wiki for more '
'information.\n'
'\n• Plain text indicates a partial string match.'
'\n• A prefix of / indicates a regular expression match.'
'\n• A prefix of | indicates a full string match.\n',
font=(font, 9),
pad=(30,30))],
[
sg.Text('Exclude'),
sg.Text('Include')
],
[
sg.Multiline(key='global-filters-exclude', enable_events=True),
sg.Multiline(key='global-filters-include', enable_events=True)
]
]
tab_system_filters = [
[sg.Text('Custom system filters',
key='custom-system-filters-heading',
font=(font, 9, 'bold'),
pad=(30,30))],
[sg.HorizontalSeparator()],
[sg.Text(
'You must open a single dat file before you can set its custom system filters.',
key='system-filters-text',
font=(font, 9),
pad=(30,30))],
[
sg.Text('Exclude', key='system-filters-label-exclude', visible=False),
sg.Text('Include', key='system-filters-label-include', visible=False)
],
[
sg.Multiline(key='system-filters-exclude', enable_events=True, visible=False),
sg.Multiline(key='system-filters-include', enable_events=True, visible=False)
]
]
# Region selection
tab_regions = [
[sg.Text('Filter by regions (must add at least one)',
font=(font, 9, 'bold'))],
[sg.HorizontalSeparator()],
[
sg.Text('Available regions'),
sg.Text('Filter by these regions (order is important)')
],
[
sg.Listbox(
enable_events=True,
key='available-regions',
select_mode=sg.LISTBOX_SELECT_MODE_EXTENDED,
size=(220*scale_multiplier,200*scale_multiplier),
values=sorted(region_data.all)),
sg.Column(
layout=[
[sg.Button(
'»',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-remainder-right',
size=(50,40),
tooltip='Add the remaining available regions to the\nend of the filtered list')],
[sg.Button(
'►',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-right',
size=(50,40),
tooltip='Move the selected regions to the filtered list')],
[sg.Button(
'◄',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-left',
size=(50,40),
tooltip='Move the selected regions to the available list')],
[sg.Button(
'«',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-all-left',
size=(50,40),
tooltip='Move all regions to the available list')]
]
),
sg.Listbox(
values='',
size=(220*scale_multiplier,200*scale_multiplier),
key='filtered-regions',
enable_events=True,
select_mode=sg.LISTBOX_SELECT_MODE_EXTENDED),
sg.Column(
layout=[
[sg.Text('', font=('Arial, Verdana', 10), size=(50,20))],
[sg.Button(
'▲',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-up',
size=(50,40),
tooltip='Move the selected regions up the order')],
[sg.Button(
'▼',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-region-move-down',
size=(50,40),
tooltip='Move the selected regions down the order')]
]
)
],
[sg.Text('', font=('Arial, Verdana', 9), size=(30,15))],
[
sg.Button(
'Use suggested region order for English speakers',
enable_events=True,
key='button-default-region-order',
size=(350*scale_multiplier,45*scale_multiplier),
target=(555666777,-1),
tooltip='Set a region order that prioritizes English and 60Hz regions.\nAdd only English in the Languages tab to restrict the output to English titles.')
],
[sg.Text('', font=('Arial, Verdana', 9), size=(30,55))],
]
# Language selection
tab_languages = [
[sg.Text('Filter by language (leave filter list empty to include all languages)', font=(font, 9, 'bold'))],
[sg.HorizontalSeparator()],
[
sg.Text('Available languages'),
sg.Text('Filter by these languages (order doesn\'t matter)')
],
[
sg.Listbox(
enable_events=True,
key='available-languages',
select_mode=sg.LISTBOX_SELECT_MODE_EXTENDED,
size=(220*scale_multiplier,200*scale_multiplier),
values=sorted(region_data.languages_long)),
sg.Column(
layout=[
[sg.Button(
'»',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-language-move-remainder-right',
size=(50,40),
tooltip='Add the remaining available languages to the\nend of the filtered list')],
[sg.Button(
'►',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-language-move-right',
size=(50,40),
tooltip='Move the selected languages to the filtered list')],
[sg.Button(
'◄',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-language-move-left',
size=(50,40),
tooltip='Move the selected regions to the available list')],
[sg.Button(
'«',
enable_events=True,
font=(f'Arial, Verdana', 10),
key='button-language-move-all-left',
size=(50,40),
tooltip='Move all regions to the available list')],
]),
sg.Listbox(
enable_events=True,
key='filtered-languages',
select_mode=sg.LISTBOX_SELECT_MODE_EXTENDED,
size=(220*scale_multiplier,200*scale_multiplier),
values=''),
sg.Column(
layout=[
[sg.Text('', font=(f'Arial, Verdana', 10), size=(50,40))]
]),
],
[sg.Text('', font=('Arial, Verdana', 9), size=(30,15))],
[sg.Text('', size=(350*scale_multiplier,45*scale_multiplier))],
[sg.Text('', font=('Arial, Verdana', 9), size=(30,55))],
]
# The actual GUI layout
layout = [
[sg.Menu(menu, key='menu')],
# File/folder selection
[sg.Text('Select the dat/s to convert', font=(font, 9, 'bold'))],
[
sg.Input(visible=False, enable_events=True, key='button-single-dat'),
sg.FileBrowse(
'Choose single dat',
file_types=(('Dat files', '*.dat'),('All files', '*.*')),
initial_folder=None,
size=(170*scale_multiplier,45*scale_multiplier),
target=(555666777,-1)),
sg.Input(visible=False, enable_events=True, key='button-folder-dat'),
sg.FolderBrowse(
'Choose folder of dats',
initial_folder=None,
size=(170*scale_multiplier,45*scale_multiplier),
target=(555666777,-1)),
],
[sg.Text('Nothing selected yet', size=(500*scale_multiplier,20*scale_multiplier), text_color='#777', key='filename', font=(f'{font}', 9))],
[sg.Text('_'*125, text_color='#CCC')],
[sg.Text('', font=(f'{font}', 6, 'bold'))],
[sg.Text('Select an output folder', font=(font, 9, 'bold'))],
[
sg.Input(visible=False, enable_events=True, key='button-output-folder'),
sg.FolderBrowse(
'Choose output folder',
initial_folder=None,
size=(170*scale_multiplier,45*scale_multiplier),
target=(555666777,-1))
],
[sg.Text('Nothing selected yet', size=(500*scale_multiplier,20*scale_multiplier), text_color='#777', key='output-folder', font=(f'{font}', 9))],
[sg.Text('', font=(f'{font}', 10, 'bold'))],
# Tabs
[
sg.TabGroup(
[
[
sg.Tab('Regions', tab_regions, background_color='white'),
sg.Tab('Languages', tab_languages, background_color='white'),
sg.Tab('Exclusions', tab_exclusions, background_color='white'),
sg.Tab('Modes', tab_modes, background_color='white'),
sg.Tab('Custom global filters', tab_global_filters, background_color='white'),
sg.Tab('Custom system filters', tab_system_filters, background_color='white')
],
],
key='tab-group'
)
],
[sg.Text('', font=(f'{font}', 6, 'bold'))],
# CTAs
[
sg.Text('Your settings are saved automatically for future use', size=(500*scale_multiplier,30), text_color='#777'),
sg.Button('Go!', size=(130*scale_multiplier,45*scale_multiplier), key='button-go', bind_return_key=True, tooltip='Process the input dat/s')]
]
window = sg.Window('Retool - convert Redump and No-Intro dats to 1G1R!',
layout,
icon='retool.ico',
resizable=False,
finalize=True)
# Customize styles
checkbox_style = (
'QCheckBox {font-size: 10pt;}'
+ 'QCheckBox::indicator {margin: 4px 4px 2px 0;}'
+ 'QCheckBox::indicator {width: 14px; height: 14px;}'
+ 'QCheckBox::indicator:unchecked {image: url(images/checkbox.svg);}'
+ 'QCheckBox::indicator:unchecked:hover {image: url(images/checkbox-hover.svg);}'
+ 'QCheckBox::indicator:unchecked:pressed {image: url(images/checkbox-pressed.svg);}'
+ 'QCheckBox::indicator:checked {image: url(images/checkbox-checked.svg);}'
+ 'QCheckBox::indicator:checked:hover {image: url(images/checkbox-checked-hover.svg);}'
+ 'QCheckBox::indicator:checked:pressed {image: url(images/checkbox-checked-pressed.svg);}'
)
tab_style = (
f'QTabBar {{font-size: 9pt; font-family: {font}}}'
)
for key in window.AllKeysDict:
if 'checkbox-' in key:
window[key].QT_Checkbox.setStyleSheet(checkbox_style)
if 'tab-group' in key:
window[key].Widget.setStyleSheet(tab_style)
input_file = ''
output_folder = ''
# Reset listboxes for predictable results
window['filtered-regions'].update([])
window['filtered-languages'].update([])
# Import settings from user-config.yaml
gui_settings = []
settings = import_user_config(region_data, UserInput())
if settings.user_config.data['language filter'] != '':
window['filtered-languages'].update(sorted(settings.user_config.data['language filter']))
window['available-languages'].update([language for language in getattr(window['available-languages'], 'Values') if language not in settings.user_config.data['language filter']])
if settings.user_config.data['region order'] != '':
window['filtered-regions'].update(settings.user_config.data['region order'])
window['available-regions'].update([language for language in getattr(window['available-regions'], 'Values') if language not in settings.user_config.data['region order']])
for setting in settings.user_config.data['gui settings']:
if 'exclude' in str(setting):
for key, value in dict(setting).items():
if value != '':
if 'a' in value: window['checkbox-applications'].update(True)
if 'A' in value: window['checkbox-audio'].update(True)
if 'b' in value: window['checkbox-bad-dumps'].update(True)
if 'B' in value: window['checkbox-bios-and-other-chips'].update(True)
if 'c' in value: window['checkbox-coverdiscs'].update(True)
if 'd' in value: window['checkbox-demos-kiosks-and-samples'].update(True)
if 'D' in value: window['checkbox-add-ons'].update(True)
if 'e' in value: window['checkbox-educational'].update(True)
if 'm' in value: window['checkbox-manuals'].update(True)
if 'M' in value: window['checkbox-multimedia'].update(True)
if 'o' in value: window['checkbox-bonus-discs'].update(True)
if 'p' in value: window['checkbox-pirate'].update(True)
if 'P' in value: window['checkbox-preproduction'].update(True)
if 'r' in value: window['checkbox-promotional'].update(True)
if 'u' in value: window['checkbox-unlicensed'].update(True)
if 'v' in value: window['checkbox-video'].update(True)
if 'output' in str(setting):
window['button-output-folder'].update(os.path.abspath(dict(setting)['output']))
window['output-folder'].update(os.path.abspath(dict(setting)['output']))
output_folder = os.path.abspath(dict(setting)['output'])
if 'emptytitles' in settings.user_config.data['gui settings']: window['checkbox-include-titles-that-dont-have-hashes-roms-or-disks-specified']
if 'z' in settings.user_config.data['gui settings']: window['checkbox-titles-ripped-from-modern-platform-rereleases-replace-standard-editions'].update(True)
if 'y' in settings.user_config.data['gui settings']: window['checkbox-dont-replace-unl-and-aftermarket-titles-if-a-production-version-is-found-in-another-region'].update(True)
if 'x' in settings.user_config.data['gui settings']: window['checkbox-output-dat-in-legacy-parent-clone-format'].update(True)
if 'log' in settings.user_config.data['gui settings']: window['checkbox-also-output-lists-of-what-titles-have-been-kept-and-removed'].update(True)
if 'list' in settings.user_config.data['gui settings']:
window['checkbox-also-output-a-list-of-just-the-1g1r-title-names'].update(True)
window['prefix-label'].update(visible=True)
window['prefix-input'].update(visible=True)
window['suffix-label'].update(visible=True)
window['suffix-input'].update(visible=True)
if 'nofilters' in settings.user_config.data['gui settings']: window['checkbox-disable-custom-global-and-system-filters'].update(True)
# Import list prefix and suffixes
if settings.user_config.data['list prefix'] != '':
window['prefix-input'].update(settings.user_config.data['list prefix'][0])
if settings.user_config.data['list suffix'] != '':
window['suffix-input'].update(settings.user_config.data['list suffix'][0])
# Import settings from user-filters/global.yaml
user_filters = import_user_filters('global', 'global')
window['global-filters-exclude'].update('\n'.join(user_filters.data['exclude']))
window['global-filters-include'].update('\n'.join(user_filters.data['include']))
# Check for clone lists
if os.path.exists('./clonelists'):
if len(os.listdir('./clonelists')) == 0:
gate(
window,
'Update clone lists?',
'You don\'t have any clone lists. Clone lists help Retool to match\ntitles with different names in different regions.\n\nDownload them now?','Download','No thanks')
else:
gate(
window,
'Update clone lists?',
'You don\'t have any clone lists. Clone lists help Retool to match\ntitles with different names in different regions.\n\nDownload them now?','Download','No thanks')
# Instantiate filters and other things
filters = Filters()
# The main loop
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'):
break
if event == 'Check for clone list updates':
updateclonelists.main()
if event =='Wiki':
webbrowser.open('https://github.com/unexpectedpanda/retool/wiki', new=2)
if event =='Github':
webbrowser.open('https://github.com/unexpectedpanda/retool/', new=2)
if event == 'About...':
retool_version = retool.retool_version()
gate(
window,
'Retool GUI',
f'Retool converts Redump and No-Intro dats to 1G1R.\n\nVersions:\n\n • Retool GUI:\t{__version__}\n • Retool CLI:\t{retool_version}')
if event == 'button-go':
# Make sure the required settings are available
error_list = []
if input_file == '':
error_list.append('Choose a dat file or folder of dats')
if output_folder == '':
error_list.append('Choose an output folder')
if getattr(window['filtered-regions'], 'Values') == []:
error_list.append('Choose at least one region to filter by')
error_list_string = '\n • '.join(error_list)
if error_list != []:
gate(window, 'Missing details required', f'You must do the following before you can process a dat: \n\n • {error_list_string}')
else:
if getattr(window['filtered-languages'], 'Values') != []:
filter_by_languages = True
else:
filter_by_languages = False
if gui_settings == []:
gui_output_settings = ''
else:
hidden_options = ['Input', 'g', 'l', 'output', 'q', 'errors', 'log', 'nofilters', 'list']
gui_output_settings = [setting for setting in gui_settings if ('output' not in setting and 'exclude' not in setting)]
exclude_settings = [setting for setting in gui_settings if 'exclude' in setting]
exclude_settings = str(exclude_settings).replace('exclude: ','')
if exclude_settings != '':
gui_output_settings.append(exclude_settings.replace('[','').replace(']','').replace('\'',''))
gui_output_settings = f' (-{"".join(sorted([setting for setting in gui_output_settings if setting not in hidden_options], key=str.casefold))})'
gui_input = UserInput(
input_file,
output_folder,
values['checkbox-applications'],
values['checkbox-audio'],
values['checkbox-bad-dumps'],
values['checkbox-bios-and-other-chips'],
values['checkbox-coverdiscs'],
values['checkbox-demos-kiosks-and-samples'],
values['checkbox-add-ons'],
values['checkbox-educational'],
values['checkbox-manuals'],
values['checkbox-multimedia'],
values['checkbox-bonus-discs'],
values['checkbox-pirate'],
values['checkbox-preproduction'],
values['checkbox-promotional'],
values['checkbox-unlicensed'],
values['checkbox-video'],
values['checkbox-titles-ripped-from-modern-platform-rereleases-replace-standard-editions'],
values['checkbox-dont-replace-unl-and-aftermarket-titles-if-a-production-version-is-found-in-another-region'],
filter_by_languages, # languages
values['checkbox-output-dat-in-legacy-parent-clone-format'],
gui_output_settings, # user options
False,
values['checkbox-disable-custom-global-and-system-filters'],
values['checkbox-also-output-lists-of-what-titles-have-been-kept-and-removed'],
values['checkbox-also-output-a-list-of-just-the-1g1r-title-names'],
values['checkbox-include-titles-that-dont-have-hashes-roms-or-disks-specified'],
False)
retool.main(gui_input)
if event == 'checkbox-also-output-a-list-of-just-the-1g1r-title-names':
if values['checkbox-also-output-a-list-of-just-the-1g1r-title-names'] == True:
window['prefix-label'].update(visible=True)
window['prefix-input'].update(visible=True)
window['suffix-label'].update(visible=True)
window['suffix-input'].update(visible=True)
else:
window['prefix-label'].update(visible=False)
window['prefix-input'].update(visible=False)
window['suffix-label'].update(visible=False)
window['suffix-input'].update(visible=False)
if event == 'button-single-dat':
if values['button-single-dat'] != '':
window['filename'].update(os.path.basename(values['button-single-dat']))
input_file = values['button-single-dat']
# Get the dat name
dat_read = process_input_dat(input_file, False, True)
if 'PlayStation Portable' in dat_read.name:
if 'no-intro' in dat_read.url:
filters.system_file = 'Sony - PlayStation Portable (No-Intro)'
elif 'redump' in dat_read.url:
filters.system_file = 'Sony - PlayStation Portable (Redump)'
else:
filters.system_file = dat_read.name
# Clear the custom system filters
window['system-filters-exclude'].update('')
window['system-filters-include'].update('')
# Import settings from user-filters/dat-name.yaml if it exists
if os.path.isfile(f'user-filters/{filters.system_file}.yaml'):
custom_system_filters = import_user_filters(filters.system_file, 'system')
window['system-filters-exclude'].update('\n'.join(custom_system_filters.data['exclude']))
window['system-filters-include'].update('\n'.join(custom_system_filters.data['include']))
# Show the custom system filters
system_filters_text = (
'Exclude or include specific titles by adding your own text '
'strings to match against. Each string should\nbe on its own '
'line, and is case sensitive. See the wiki for more '
'information.\n'
'\n• Plain text indicates a partial string match.'
'\n• A prefix of / indicates a regular expression match.'
'\n• A prefix of | indicates a full string match.\n'
)
window['custom-system-filters-heading'].update(f'Custom system filters ({filters.system_file})')
window['system-filters-text'].update(system_filters_text)
window['system-filters-label-exclude'].update(visible=True)
window['system-filters-label-include'].update(visible=True)
window['system-filters-exclude'].update(visible=True)
window['system-filters-include'].update(visible=True)
if event == 'button-folder-dat':
if values['button-folder-dat'] != '':
window['filename'].update(os.path.abspath(values['button-folder-dat']))
input_file = values['button-folder-dat']
if event == 'button-output-folder':
if values['button-output-folder'] != '':
window['output-folder'].update(os.path.abspath(values['button-output-folder']))
output_folder = values['button-output-folder']
if event == 'button-region-move-right':
move_listbox_item_right(window, 'available-regions', 'filtered-regions', values)
if event == 'button-region-move-left':
move_listbox_item_left(window, 'available-regions', 'filtered-regions', values)
if event == 'button-region-move-remainder-right':
move_listbox_remainder_right(window, 'available-regions', 'filtered-regions')
if event == 'button-region-move-all-left':
move_listbox_all_left(window, 'available-regions', 'filtered-regions')
if event == 'button-region-move-up' or event == 'button-region-move-down':
# Get the values from the filtered regions listbox
if type(getattr(window['filtered-regions'], 'Values')) is not str:
filtered_region_list = getattr(window['filtered-regions'], 'Values')
else:
filtered_region_list = []
# Get the indexes of everything selected, and figure out the items required
# to calculate the move
if filtered_region_list != []:
all_regions = getattr(window['filtered-regions'], 'Values')
selected_regions = values['filtered-regions']
selected_indexes = []
for i, value in enumerate(all_regions):
if value in values['filtered-regions']:
selected_indexes.append(i)
if selected_indexes != []:
filtered_regions_remainder = [region for region in all_regions if region not in selected_regions]
# Shuffle the items up or down
if event == 'button-region-move-up':
if selected_indexes[0] <= 1:
all_regions = selected_regions + filtered_regions_remainder
else:
all_regions = filtered_regions_remainder[:selected_indexes[0] - 1] + selected_regions + filtered_regions_remainder[selected_indexes[0]-1:]
# Change the position of the list box when moving items
window['filtered-regions'].update(all_regions)
window['filtered-regions'].set_value(selected_regions)
window['filtered-regions'].Widget.scrollToItem(window['filtered-regions'].Widget.item(selected_indexes[0] - 2))
if event == 'button-region-move-down':
if selected_indexes[-1] >= len(all_regions) - 2:
all_regions = filtered_regions_remainder + selected_regions
else:
all_regions = (
filtered_regions_remainder[:-len(all_regions[selected_indexes[-1] + 2:])]
+ selected_regions
+ all_regions[selected_indexes[-1] + 2:]
)
# Change the position of the list box when moving items
window['filtered-regions'].update(all_regions)
window['filtered-regions'].set_value(selected_regions)
window['filtered-regions'].Widget.scrollToItem(window['filtered-regions'].Widget.item(selected_indexes[-1] + 2))
if event == 'button-default-region-order':
window['available-regions'].update([])
window['filtered-regions'].update(region_data.region_order)
if event == 'button-language-move-right':
move_listbox_item_right(window, 'available-languages', 'filtered-languages', values, True)
if event == 'button-language-move-left':
move_listbox_item_left(window, 'available-languages', 'filtered-languages', values)
if event == 'button-language-move-remainder-right':
move_listbox_remainder_right(window, 'available-languages', 'filtered-languages', True)
if event == 'button-language-move-all-left':
move_listbox_all_left(window, 'available-languages', 'filtered-languages')
# Write settings any time the user interacts with the appropriate widgets
if event:
gui_settings = []
excludes = []
if values['checkbox-applications'] == True:
excludes.append('a')
if values['checkbox-audio'] == True:
excludes.append('A')
if values['checkbox-bad-dumps'] == True:
excludes.append('b')
if values['checkbox-bios-and-other-chips'] == True:
excludes.append('B')
if values['checkbox-coverdiscs'] == True:
excludes.append('c')
if values['checkbox-demos-kiosks-and-samples'] == True:
excludes.append('d')
if values['checkbox-add-ons'] == True:
excludes.append('D')
if values['checkbox-educational'] == True:
excludes.append('e')
if values['checkbox-manuals'] == True:
excludes.append('m')
if values['checkbox-multimedia'] == True:
excludes.append('M')
if values['checkbox-bonus-discs'] == True:
excludes.append('o')
if values['checkbox-pirate'] == True:
excludes.append('p')
if values['checkbox-preproduction'] == True:
excludes.append('P')
if values['checkbox-promotional'] == True:
excludes.append('r')
if values['checkbox-unlicensed'] == True:
excludes.append('u')
if values['checkbox-video'] == True:
excludes.append('v')
if values['checkbox-include-titles-that-dont-have-hashes-roms-or-disks-specified'] == True:
gui_settings.append('emptytitles')
if values['checkbox-titles-ripped-from-modern-platform-rereleases-replace-standard-editions'] == True:
gui_settings.append('z')
if values['checkbox-dont-replace-unl-and-aftermarket-titles-if-a-production-version-is-found-in-another-region'] == True:
gui_settings.append('y')
if values['checkbox-output-dat-in-legacy-parent-clone-format'] == True:
gui_settings.append('x')
if values['checkbox-also-output-lists-of-what-titles-have-been-kept-and-removed'] == True:
gui_settings.append('log')
if values['checkbox-also-output-a-list-of-just-the-1g1r-title-names'] == True:
gui_settings.append('list')
if values['checkbox-disable-custom-global-and-system-filters'] == True:
gui_settings.append('nofilters')
if values['button-output-folder'] != '':
gui_settings.append(f'output: {os.path.abspath(values["button-output-folder"])}')
gui_settings.append(f'exclude: {"".join(excludes)}')
if values['global-filters-exclude'] != '':
filters.global_exclude = []
for filter_text in values['global-filters-exclude'].splitlines():
if filter_text != '': filters.global_exclude.append(filter_text.replace('\\', '\\\\').replace('"', '\\"'))
if values['global-filters-include'] != '':
filters.global_include = []
for filter_text in values['global-filters-include'].splitlines():
if filter_text != '': filters.global_include.append(filter_text.replace('\\', '\\\\').replace('"', '\\"'))
if values['system-filters-exclude'] != '':
filters.system_exclude = []
for filter_text in values['system-filters-exclude'].splitlines():
if filter_text != '': filters.system_exclude.append(filter_text.replace('\\', '\\\\').replace('"', '\\"'))
if values['system-filters-include'] != '':
filters.system_include = []
for filter_text in values['system-filters-include'].splitlines():
if filter_text != '': filters.system_include.append(filter_text.replace('\\', '\\\\').replace('"', '\\"'))
if values['prefix-input'] != '':
list_prefix = values['prefix-input'].replace('\\', '\\\\').replace('"', '\\"')
else:
list_prefix = False
if values['suffix-input'] != '':
list_suffix = values['suffix-input'].replace('\\', '\\\\').replace('"', '\\"')
else:
list_suffix = False
# Write the user-config.yaml file
region_settings = getattr(window['filtered-regions'], 'Values').copy()
for region in getattr(window['available-regions'], 'Values'):
region_settings.append(f'True|{region}')
if getattr(window['filtered-languages'], 'Values') != []:
language_settings = getattr(window['filtered-languages'], 'Values').copy()
else:
language_settings = []
for language in getattr(window['available-languages'], 'Values'):
language_settings.append(f'True|{language}')
if values['global-filters-exclude'] == '': filters.global_exclude = []
if values['global-filters-include'] == '': filters.global_include = []
if values['system-filters-exclude'] == '': filters.system_exclude = []
if values['system-filters-include'] == '': filters.system_include = []
generate_config(language_settings, region_settings, list_prefix, list_suffix, True, filters, gui_settings, True)
window.close()
def move_listbox_item_right(window, left_key, right_key, values, sort=False):
# Get the values that should remain in the left listbox
left_listbox = [item for item in getattr(window[left_key], 'Values') if item not in values[left_key]]
# Get the values in the right listbox, and add the new items
if type(getattr(window[right_key], 'Values')) is not str:
right_listbox = getattr(window[right_key], 'Values')
else:
right_listbox = []
window[left_key].update(left_listbox)
if sort == False:
window[right_key].update(right_listbox + values[left_key])
else:
window[right_key].update(sorted(right_listbox + values[left_key]))
def move_listbox_item_left(window, left_key, right_key, values):
# Get the values that should remain in the right listbox
right_listbox = [item for item in getattr(window[right_key], 'Values') if item not in values[right_key]]
# Get the values in the left listbox, and add the new items
if type(getattr(window[left_key], 'Values')) is not str:
left_listbox = getattr(window[left_key], 'Values')
else:
left_listbox = []
window[right_key].update(right_listbox)
window[left_key].update(sorted(left_listbox + values[right_key]))
def move_listbox_remainder_right(window, left_key, right_key, sort=False):
# Get the values in the right listbox, and add the remaining items from the left listbox
if type(getattr(window[right_key], 'Values')) is not str:
right_listbox = getattr(window[right_key], 'Values')
else:
right_listbox = []
if sort == False:
window[right_key].update(right_listbox + getattr(window[left_key], 'Values'))
else:
window[right_key].update(sorted(right_listbox + getattr(window[left_key], 'Values')))
window[left_key].update([])
def move_listbox_all_left(window, left_key, right_key):
# Get the values in the right listbox, and move them all to the left listbox
if type(getattr(window[right_key], 'Values')) is not str:
right_listbox = getattr(window[right_key], 'Values')
else:
right_listbox = []
window[left_key].update(sorted(right_listbox + getattr(window[left_key], 'Values')))
window[right_key].update([])
def generate_checkbox(labels, width, tips=None):
checkboxes = []
single_quote = '\''
for i, label in enumerate(labels):
if tips == None:
checkboxes.append(sg.Checkbox(
enable_events=True,
key=f'checkbox-{label.lower().replace(" ", "-").replace("/", "-").replace(",","").replace(single_quote,"").replace("(","").replace(")","")}',
font=(font, 9),
pad=(0,0),
size=(width,0.6),
text=label))
else:
checkboxes.append(sg.Checkbox(
enable_events=True,
key=f'checkbox-{label.lower().replace(" ", "-").replace("/", "-").replace(",","").replace(single_quote,"").replace("(","").replace(")","")}',
font=(font, 9),
pad=(0,0),
size=(width,0.6),
text=label,
tooltip=tips[i]))
return checkboxes
def gate(window, notification_title, notification_message, button_name='Got it', secondary_button_name=False):
if secondary_button_name == False:
dialog_layout = [
[sg.Text('', font=(f'{font}', 6, 'bold'))],
[sg.Text(notification_message)],
[sg.Text('', font=(f'{font}', 10, 'bold'))],
[sg.Button(button_name,
bind_return_key=True,
enable_events=True,
key='button-no-file-got-it',
size=(100,50))]]
else:
dialog_layout = [
[sg.Text('', font=(f'{font}', 6, 'bold'))],
[sg.Text(notification_message)],
[sg.Text('', font=(f'{font}', 10, 'bold'))],
[
sg.Button(button_name,
bind_return_key=True,
enable_events=True,
key='button-download',
size=(100,50)),
sg.Button(secondary_button_name,
bind_return_key=True,
enable_events=True,
key='button-no-file-got-it',
size=(100,50))]
]
no_file_window = sg.Window(
notification_title,
layout= dialog_layout,
background_color='#aaa',
icon='retool.ico',
keep_on_top=True,
no_titlebar=True,
resizable=False,
size=(200,100),
finalize=True)
window.Disable()
while True:
popup_event, popup_values = no_file_window.read()
if popup_event in (sg.WIN_CLOSED, 'Exit'):
window.Enable()
no_file_window.close()
break
if popup_event == 'button-no-file-got-it':
window.Enable()
no_file_window.close()
break
if popup_event == 'button-download':
window.Enable()
no_file_window.close()
updateclonelists.main()
break
if __name__ == '__main__':
main() |
hail/python/test/hailtop/hailctl/dataproc/test_start.py | tdeboer-ilmn/hail | 789 | 11140417 | <filename>hail/python/test/hailtop/hailctl/dataproc/test_start.py
import pytest
from hailtop.hailctl.dataproc import cli
def test_cluster_name_required(capsys, gcloud_run):
with pytest.raises(SystemExit):
cli.main(["start"])
assert "arguments are required: name" in capsys.readouterr().err
assert gcloud_run.call_count == 0
def test_dry_run(gcloud_run):
cli.main(["start", "test-cluster", "--dry-run"])
assert gcloud_run.call_count == 0
def test_cluster_project(gcloud_run):
cli.main(["start", "--project", "foo", "test-cluster"])
assert "--project=foo" in gcloud_run.call_args[0][0]
@pytest.mark.parametrize("location_arg", [
"--region=europe-north1",
"--zone=us-central1-b",
])
def test_cluster_location(gcloud_run, location_arg):
cli.main(["start", location_arg, "test-cluster"])
assert location_arg in gcloud_run.call_args[0][0]
def test_creator_label(gcloud_run, gcloud_config):
gcloud_config["account"] = "<EMAIL>"
cli.main(["start", "my-cluster"])
assert "--labels=creator=test-user_hail_is" in gcloud_run.call_args[0][0]
gcloud_config["account"] = None
cli.main(["start", "my-cluster"])
assert not any(arg.startswith("--labels=") and "creator=" in arg for arg in gcloud_run.call_args[0][0])
def test_workers_configuration(gcloud_run):
cli.main(["start", "--num-workers=4", "test-cluster"])
assert "--num-workers=4" in gcloud_run.call_args[0][0]
@pytest.mark.parametrize("workers_arg", [
"--num-secondary-workers=8",
"--num-preemptible-workers=8"
])
def test_secondary_workers_configuration(gcloud_run, workers_arg):
cli.main(["start", workers_arg, "test-cluster"])
assert "--num-secondary-workers=8" in gcloud_run.call_args[0][0]
@pytest.mark.parametrize("machine_arg", [
"--master-machine-type=n1-highmem-16",
"--worker-machine-type=n1-standard-32",
])
def test_machine_type_configuration(gcloud_run, machine_arg):
cli.main(["start", machine_arg, "test-cluster"])
assert machine_arg in gcloud_run.call_args[0][0]
@pytest.mark.parametrize("machine_arg", [
"--master-boot-disk-size=250",
"--worker-boot-disk-size=200",
"--secondary-worker-boot-disk-size=100"
])
def test_boot_disk_size_configuration(gcloud_run, machine_arg):
cli.main(["start", machine_arg, "test-cluster"])
assert f"{machine_arg}GB" in gcloud_run.call_args[0][0]
def test_vep_defaults_to_highmem_master_machine(gcloud_run):
cli.main(["start", "test-cluster", "--vep=GRCh37"])
assert "--master-machine-type=n1-highmem-8" in gcloud_run.call_args[0][0]
def test_vep_defaults_to_larger_worker_boot_disk(gcloud_run):
cli.main(["start", "test-cluster", "--vep=GRCh37"])
assert "--worker-boot-disk-size=200GB" in gcloud_run.call_args[0][0]
assert "--secondary-worker-boot-disk-size=200GB" in gcloud_run.call_args[0][0]
@pytest.mark.parametrize("requester_pays_arg", [
"--requester-pays-allow-all",
"--requester-pays-allow-buckets=example-bucket",
"--requester-pays-allow-annotation-db",
])
def test_requester_pays_project_configuration(gcloud_run, gcloud_config, requester_pays_arg):
gcloud_config["project"] = "foo-project"
cli.main(["start", "test-cluster", requester_pays_arg])
properties = next(arg for arg in gcloud_run.call_args[0][0] if arg.startswith("--properties="))
assert "spark:spark.hadoop.fs.gs.requester.pays.project.id=foo-project" in properties
cli.main(["start", "--project=bar-project", "test-cluster", requester_pays_arg])
properties = next(arg for arg in gcloud_run.call_args[0][0] if arg.startswith("--properties="))
assert "spark:spark.hadoop.fs.gs.requester.pays.project.id=bar-project" in properties
@pytest.mark.parametrize("requester_pays_arg,expected_mode", [
("--requester-pays-allow-all", "AUTO"),
("--requester-pays-allow-buckets=example-bucket", "CUSTOM"),
("--requester-pays-allow-annotation-db", "CUSTOM"),
])
def test_requester_pays_mode_configuration(gcloud_run, gcloud_config, requester_pays_arg, expected_mode):
cli.main(["start", "test-cluster", requester_pays_arg])
properties = next(arg for arg in gcloud_run.call_args[0][0] if arg.startswith("--properties="))
assert f"spark:spark.hadoop.fs.gs.requester.pays.mode={expected_mode}" in properties
def test_requester_pays_buckets_configuration(gcloud_run, gcloud_config):
cli.main(["start", "test-cluster", "--requester-pays-allow-buckets=foo,bar"])
properties = next(arg for arg in gcloud_run.call_args[0][0] if arg.startswith("--properties="))
assert f"spark:spark.hadoop.fs.gs.requester.pays.buckets=foo,bar" in properties
@pytest.mark.parametrize("scheduled_deletion_arg", [
"--max-idle=30m",
"--max-age=1h",
])
def test_scheduled_deletion_configuration(gcloud_run, scheduled_deletion_arg):
cli.main(["start", scheduled_deletion_arg, "test-cluster"])
assert scheduled_deletion_arg in gcloud_run.call_args[0][0]
def test_master_tags(gcloud_run):
cli.main(["start", "test-cluster", "--master-tags=foo"])
assert gcloud_run.call_count == 2
assert gcloud_run.call_args_list[0][0][0][:4] == ["dataproc", "clusters", "create", "test-cluster"]
assert gcloud_run.call_args_list[1][0][0] == ["compute", "instances", "add-tags", "test-cluster-m", "--tags", "foo"]
def test_master_tags_project(gcloud_run):
cli.main(["start", "test-cluster", "--master-tags=foo", "--project=some-project"])
assert gcloud_run.call_count == 2
assert "--project=some-project" in gcloud_run.call_args_list[1][0][0]
def test_master_tags_zone(gcloud_run):
cli.main(["start", "test-cluster", "--master-tags=foo", "--zone=us-east1-d"])
assert gcloud_run.call_count == 2
assert "--zone=us-east1-d" in gcloud_run.call_args_list[1][0][0]
def test_master_tags_dry_run(gcloud_run):
cli.main(["start", "test-cluster", "--master-tags=foo", "--dry-run"])
assert gcloud_run.call_count == 0
|
alembic/versions/2014090207_add_subworksheets_to__136275e06649.py | kl-chou/codalab-worksheets | 236 | 11140420 | <filename>alembic/versions/2014090207_add_subworksheets_to__136275e06649.py
"""Add subworksheets to worksheet item
Revision ID: 136275e06649
Revises: <KEY>
Create Date: 2014-09-02 07:44:47.959083
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '136275e06649'
down_revision = '<KEY>'
def upgrade():
print('Adding subworksheets...')
# commands auto generated by Alembic - please adjust! #
op.add_column('worksheet_item', sa.Column('subworksheet_uuid', sa.String(length=63), nullable=True))
op.create_index('worksheet_item_subworksheet_uuid_index', 'worksheet_item', ['subworksheet_uuid'], unique=False)
# end Alembic commands #
def downgrade():
print('Removing subworksheets...')
# commands auto generated by Alembic - please adjust! #
op.drop_index('worksheet_item_subworksheet_uuid_index', table_name='worksheet_item')
op.drop_column('worksheet_item', 'subworksheet_uuid')
# end Alembic commands #
|
pytest-server-fixtures/pytest_server_fixtures/rethink.py | RaiVaibhav/pytest-plugins | 167 | 11140433 | import socket
import uuid
import logging
import pytest
from pytest_server_fixtures import CONFIG
from pytest_fixture_config import requires_config
from .base2 import TestServerV2
log = logging.getLogger(__name__)
rethinkdb = None
def _rethink_server(request):
""" This does the actual work - there are several versions of this used
with different scopes.
"""
test_server = RethinkDBServer()
request.addfinalizer(lambda p=test_server: p.teardown())
test_server.start()
return test_server
@pytest.fixture(scope='function')
@requires_config(CONFIG, ['rethink_executable'])
def rethink_server(request):
""" Function-scoped RethinkDB server in a local thread.
Attributes
----------
conn: (``rethinkdb.Connection``) Connection to this server instance
.. also inherits all attributes from the `workspace` fixture
"""
return _rethink_server(request)
@pytest.fixture(scope='session')
@requires_config(CONFIG, ['rethink_executable'])
def rethink_server_sess(request):
""" Same as rethink_server fixture, scoped as session instead.
"""
return _rethink_server(request)
@pytest.yield_fixture(scope="function")
def rethink_unique_db(rethink_server_sess):
""" Starts up a session-scoped server, and returns a connection to
a unique database for the life of a single test, and drops it after
"""
dbid = uuid.uuid4().hex
conn = rethink_server_sess.conn
rethinkdb.db_create(dbid).run(conn)
conn.use(dbid)
yield conn
rethinkdb.db_drop(dbid).run(conn)
@pytest.yield_fixture(scope="module")
def rethink_module_db(rethink_server_sess):
""" Starts up a module-scoped server, and returns a connection to
a unique database for all the tests in one module.
Drops the database after module tests are complete.
"""
dbid = uuid.uuid4().hex
conn = rethink_server_sess.conn
log.info("Making database")
rethinkdb.db_create(dbid).run(conn)
conn.use(dbid)
yield conn
log.info("Dropping database")
rethinkdb.db_drop(dbid).run(conn)
@pytest.fixture(scope="module")
def rethink_make_tables(request, rethink_module_db):
""" Module-scoped fixture that creates all tables specified in the test
module attribute FIXTURE_TABLES.
"""
reqd_table_list = getattr(request.module, 'FIXTURE_TABLES')
log.debug("Do stuff before all module tests with {0}".format(reqd_table_list))
conn = rethink_module_db
for table_name, primary_key in reqd_table_list:
try:
rethinkdb.db(conn.db).table_create(table_name,
primary_key=primary_key,
).run(conn)
log.info('Made table "{0}" with key "{1}"'
.format(table_name, primary_key))
except rethinkdb.errors.RqlRuntimeError as err:
log.debug('Table "{0}" not made: {1}'.format(table_name, err.message))
@pytest.yield_fixture(scope="function")
def rethink_empty_db(request, rethink_module_db, rethink_make_tables):
""" Function-scoped fixture that will empty all the tables defined
for the `rethink_make_tables` fixture.
This is a useful approach, because of the long time taken to
create a new RethinkDB table, compared to the time to empty one.
"""
tables_to_emptied = (table[0] for table
in getattr(request.module, 'FIXTURE_TABLES'))
conn = rethink_module_db
for table_name in tables_to_emptied:
rethinkdb.db(conn.db).table(table_name).delete().run(conn)
log.debug('Emptied "{0}" before test'.format(table_name))
yield conn
class RethinkDBServer(TestServerV2):
random_hostname = False
def __init__(self, **kwargs):
# defer loading of rethinkdb
global rethinkdb
import rethinkdb
super(RethinkDBServer, self).__init__(**kwargs)
self._driver_port = self._get_port(28015)
self._cluster_port = self._get_port(29015)
self._http_port = self._get_port(8080)
self.db = None
@property
def cmd(self):
return "rethindb"
@property
def cmd_local(self):
return CONFIG.rethink_executable
def get_args(self, **kwargs):
cmd = [
'--bind', self._listen_hostname,
'--driver-port', str(self.port),
'--http-port', str(self.http_port),
'--cluster-port', str(self.cluster_port),
]
if 'workspace' in kwargs:
cmd += ['--directory', str(kwargs['workspace'] / 'db')]
return cmd
@property
def image(self):
return CONFIG.rethink_image
@property
def port(self):
return self._driver_port
@property
def cluster_port(self):
return self._cluster_port
@property
def http_port(self):
return self._http_port
def check_server_up(self):
"""Test connection to the server."""
log.info("Connecting to RethinkDB at {0}:{1}".format(
self.hostname, self.port))
if not self.hostname:
return False
try:
self.conn = rethinkdb.connect(host=self.hostname,
port=self.port, db='test')
return True
except rethinkdb.RqlDriverError as err:
log.warning(err)
return False
|
dicompylercore/config.py | gacou54/dicompyler-core | 110 | 11140435 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# config.py
"""Configuration for dicompyler-core."""
# Copyright (c) 2016-2018 <NAME>
# This file is part of dicompyler-core, released under a BSD license.
# See the file license.txt included with this distribution, also
# available at https://github.com/dicompyler/dicompyler-core/
from six import PY2
mpl_available = True
pil_available = True
shapely_available = True
skimage_available = True
scipy_available = True
if PY2:
import imp
try:
imp.find_module('matplotlib')
except ImportError:
mpl_available = False
try:
imp.find_module('PIL')
except ImportError:
pil_available = False
try:
imp.find_module('shapely')
except ImportError:
shapely_available = False
try:
imp.find_module('skimage')
except ImportError:
skimage_available = False
try:
imp.find_module('scipy')
except ImportError:
scipy_available = False
else:
import importlib
mpl_available = importlib.util.find_spec("matplotlib") is not None
pil_available = importlib.util.find_spec('PIL') is not None
shapely_available = importlib.util.find_spec('shapely') is not None
skimage_available = importlib.util.find_spec('skimage') is not None
scipy_available = importlib.util.find_spec('scipy') is not None
# DICOM UID prefix
dicompyler_uid_prefix = '1.2.826.0.1.3680043.8.1070.'
dicompyler_uid_prefix_image = dicompyler_uid_prefix + '1.'
dicompyler_uid_prefix_rtstruct = dicompyler_uid_prefix + '2.'
dicompyler_uid_prefix_rtplan = dicompyler_uid_prefix + '3.'
dicompyler_uid_prefix_rtdose = dicompyler_uid_prefix + '4.'
|
examples/cookbook/symsph.py | dualword/pymol-open-source | 636 | 11140469 | <gh_stars>100-1000
# symshp: create a symmetry-expanded sphere about a selection
# usage:
#
# symexp name [,selection [,cutoff ]]
from pymol import cmd as global_cmd
def symsph(name, selection="sele", cutoff=20.0, self_cmd=global_cmd):
cutoff = float(cutoff)
prefix = selection+"_symarea_"
tmp_obj = selection+"_tmp"
if selection not in self_cmd.get_names("selections"):
print(" error: '"+selection+"' is not defined.")
return self_cmd.DEFAULT_ERROR
if not self_cmd.count_atoms(selection):
print(" error: '"+selection+"' contains no atoms.")
return self_cmd.DEFAULT_ERROR
obj_list = self_cmd.get_object_list(selection)
if len(obj_list)!=1:
print(script_name+" error: '"+selection+"' must only span one object.'")
return self_cmd.DEFAULT_ERROR
obj = obj_list[0]
cmd.center(selection)
cmd.pseudoatom(tmp_obj)
cmd.delete(prefix+"*")
cmd.symexp(prefix,obj,tmp_obj,cutoff,segi=1)
cmd.create(name,"("+obj+" or "+prefix+"*) within %1.9f of %s"%(cutoff,tmp_obj))
cmd.delete(tmp_obj)
cmd.delete(prefix+"*")
#symsph("sele",20)
cmd.extend("symsph",symsph)
|
libs/cherrypy/tutorial/tut08_generators_and_yield.py | scambra/HTPC-Manager | 674 | 11140482 | <reponame>scambra/HTPC-Manager
"""
Bonus Tutorial: Using generators to return result bodies
Instead of returning a complete result string, you can use the yield
statement to return one result part after another. This may be convenient
in situations where using a template package like CherryPy or Cheetah
would be overkill, and messy string concatenation too uncool. ;-)
"""
import cherrypy
class GeneratorDemo:
def header(self):
return "<html><body><h2>Generators rule!</h2>"
def footer(self):
return "</body></html>"
def index(self):
# Let's make up a list of users for presentation purposes
users = ['Remi', 'Carlos', 'Hendrik', '<NAME>']
# Every yield line adds one part to the total result body.
yield self.header()
yield "<h3>List of users:</h3>"
for user in users:
yield "%s<br/>" % user
yield self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(GeneratorDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(GeneratorDemo(), config=tutconf)
|
tests/unit/peapods/pods/test_networking.py | Karnak123/jina | 15,179 | 11140500 | from jina.helper import get_internal_ip
from jina import __default_host__, __docker_host__
from jina.peapods.networking import get_connect_host, is_remote_local_connection
from jina.parsers import set_pea_parser
import pytest
docker_uses = 'docker://abc'
@pytest.mark.parametrize(
'bind_host, connect_host, connect_uses, expected_connect_host',
[
(
__default_host__,
__default_host__,
None,
__default_host__,
), # local bind & local connect
(
__default_host__,
__default_host__,
docker_uses,
__docker_host__,
), # local bind & local connect, connect inside docker
(
'1.2.3.4',
__default_host__,
None,
'1.2.3.4',
), # remote bind & local connect
(
'1.2.3.4',
__default_host__,
docker_uses,
'1.2.3.4',
), # remote bind & local connect, connect inside docker
(
__default_host__,
'localhost:8000',
None,
__docker_host__,
), # local bind & "pseudo" remote connect (used in tests), should be dockerhost
(
__default_host__,
'localhost:8000',
docker_uses,
__docker_host__,
), # local bind & "pseudo" remote connect (used in tests), should be dockerhost
(
__default_host__,
'1.2.3.4',
None,
get_internal_ip(),
), # local bind, remote connect
(
__default_host__,
'1.2.3.4',
docker_uses,
get_internal_ip(),
), # local bind, remote connect, connect inside docker
(
'1.2.3.4',
'1.2.3.4',
None,
__docker_host__,
), # bind and connect same remote
(
'1.2.3.4',
'1.2.3.4',
None,
__docker_host__,
), # bind and connect same remote, connect inside docker
('2.3.4.5', '1.2.3.4', None, '2.3.4.5'), # bind and connect diff remotes
(
'2.3.4.5',
'1.2.3.4',
docker_uses,
'2.3.4.5',
), # bind and connect diff remotes, connect inside docker
],
)
def test_get_connect_host(connect_host, bind_host, connect_uses, expected_connect_host):
connect_args = set_pea_parser().parse_args(
['--host', connect_host, '--uses', connect_uses]
)
connect_host = get_connect_host(
bind_host=bind_host,
bind_expose_public=False,
connect_args=connect_args,
)
assert connect_host == expected_connect_host
def test_is_remote_local_connection():
assert not is_remote_local_connection('0.0.0.0', '0.0.0.0')
assert not is_remote_local_connection('localhost', 'localhost')
assert not is_remote_local_connection('localhost', '1.2.3.4')
assert not is_remote_local_connection('1.2.3.4', '2.3.4.5')
assert not is_remote_local_connection('127.0.0.1', 'localhost')
assert not is_remote_local_connection('192.168.0.1', 'localhost')
assert not is_remote_local_connection('192.168.0.1', 'globalhost')
assert is_remote_local_connection('1.2.3.4', 'localhost')
assert is_remote_local_connection('globalhost', 'localhost')
assert is_remote_local_connection('1.2.3.4', '192.168.0.1')
|
api/genie/tests/test_views_schedules.py | zhangkuantian/cuelake | 272 | 11140514 | <reponame>zhangkuantian/cuelake<gh_stars>100-1000
import pytest
from django.urls import reverse
from conftest import populate_seed_data
from genie.models import CustomSchedule as Schedule
@pytest.mark.django_db(transaction=True)
def test_schedules(client, populate_seed_data, mocker):
# Create schedule test
path = reverse('scheduleView')
data = {'name': 'Schedule at 3 AM ',
'crontab': '0 3 * * *',
'timezone':'Asia/Kolkata' }
response = client.post(path, data=data, content_type="application/json")
assert response.status_code == 200
assert response.data['data']
scheduleId = response.data["data"]
# Update schedule test
path = reverse('scheduleView')
data = {'id': scheduleId,
'name': 'Schedule at 4 AM ',
'crontab': '0 4 * * *',
'timezone': 'Asia/Kolkata'}
response = client.put(path, data=data, content_type="application/json")
assert response.status_code == 200
assert Schedule.objects.get(id=scheduleId).name == "Schedule at 4 AM "
# Get schedule test
path = reverse('scheduleView')
response = client.get(path)
assert response.status_code == 200
assert response.data["data"]
# Get single schedule test
path = reverse("getSingleSchedule", kwargs={"scheduleId": scheduleId})
response = client.get(path)
assert response.status_code == 200
assert response.data["data"]
assert response.data["data"][0]["name"] == "Schedule at 4 AM "
# Delete schedule test
path = reverse("getSingleSchedule", kwargs={"scheduleId": scheduleId})
response = client.delete(path)
assert response.status_code == 200
assert Schedule.objects.filter(id=scheduleId).count() == 0
|
app/game_master.py | Palmito94/posio | 521 | 11140518 | # -*- coding: utf-8 -*-
from app import socketio, app
from posio.game import Game
class GameMaster:
"""
Starts the game and manages the game lifecycle
"""
def __init__(self,
score_max_distance,
leaderboard_answer_count,
max_response_time,
time_between_turns):
"""
:param score_max_distance: The distance above which player scores will be null
:param leaderboard_answer_count: How many answers are used to compute user scores in the leaderboard
:param max_response_time: The time given to a player to answer a question
:param between_turns_duration: The time between two turns
"""
self.game = Game(score_max_distance, leaderboard_answer_count)
self.max_response_time = max_response_time
self.time_between_turns = time_between_turns
def start_game(self):
# Start the game
socketio.start_background_task(target=self.run_game)
def run_game(self):
while True:
# Start a new turn
self.start_turn()
# Give the players some time to answer
socketio.sleep(self.max_response_time)
# End the turn
self.end_turn()
# Send the new leaderboard to players
self.update_leaderboard()
# Give the user some time between two turns
socketio.sleep(self.time_between_turns)
def start_turn(self):
app.logger.debug('Starting new turn')
# Start a new turn
self.game.start_new_turn()
# Get the city for this turn
city = self.game.get_current_city()
# Send the infos on the new city to locate to every players in the game
socketio.emit(
'new_turn',
{
'city': city['name'],
'country': city['country'],
'country_code': city['country'],
},
)
def end_turn(self):
app.logger.debug('Ending turn')
# End current turn
self.game.end_current_turn()
# Rank answers
ranked_players = self.game.get_current_turn_ranks()
player_count = len(ranked_players)
# Send end of turn signal and correct/best answers to players
city = self.game.get_current_city()
turn_results = {
'correct_answer':
{
'name': city['name'],
'lat': city['latitude'],
'lng': city['longitude'],
}
}
if player_count > 0:
best_result = ranked_players[0].get_result(self.game.turn_number)
best_answer = ranked_players[0].get_answer(self.game.turn_number)
turn_results['best_answer'] = {
'distance': best_result.distance,
'lat': best_answer.latitude,
'lng': best_answer.longitude
}
socketio.emit('end_of_turn', turn_results)
# Then send individual player results
for rank, player in enumerate(ranked_players):
result = player.get_result(self.game.turn_number)
answer = player.get_answer(self.game.turn_number)
socketio.emit('player_results',
{
'rank': rank + 1,
'total': player_count,
'distance': result.distance,
'score': result.score,
'lat': answer.latitude,
'lng': answer.longitude,
},
room=player.sid)
def update_leaderboard(self):
app.logger.debug('Updating leaderboard')
# Get a sorted list of all the scores
scores = self.game.get_ranked_scores()
top_ten = [{'player_name': score['player'].name,
'score': score['score']} for score in scores[:10]]
# Number of players
total_player = len(scores)
# Send top ten + player score and rank to each player
for rank, score in enumerate(scores):
socketio.emit(
'leaderboard_update',
{
'top_ten': top_ten,
'total_player': total_player,
'player_rank': rank,
'player_score': score['score'],
},
room=score['player'].sid)
|
golem/gui/gui_utils.py | Sunil-Rathore/golem | 171 | 11140525 | <filename>golem/gui/gui_utils.py
"""Helper functions to deal with Golem GUI module application."""
import configparser
import copy
import inspect
import os
import subprocess
import sys
from functools import wraps
from urllib.parse import urlparse, urljoin
from flask import abort, render_template, request
from flask_login import current_user
import golem.actions
from golem.core import utils, session, errors, test_directory, settings_manager
from golem.gui.user_management import Permissions
DEFAULT_SECRET_KEY = '<KEY>'
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def run_test(project, test_name, test_functions=None, browsers=None, environments=None, processes=1):
"""Run a test case. This is used when running tests from the GUI"""
script_name = sys.argv[0]
if script_name[-5:] != 'golem' and script_name[-9:] != 'golem.exe':
if sys.platform == 'win32':
script_name = 'golem'
else:
which_golem = subprocess.run(['which', 'golem'], stdout=subprocess.PIPE)
script_name = which_golem.stdout.decode('utf-8').strip()
timestamp = utils.get_timestamp()
param_list = [
script_name,
'--golem-dir',
session.testdir,
'run',
project,
test_name,
'--timestamp',
timestamp]
if browsers:
param_list.append('--browsers')
for browser in browsers:
param_list.append(browser)
if environments:
param_list.append('--environments')
for environment in environments:
param_list.append(environment)
if processes:
param_list.append('--processes')
param_list.append(str(processes))
if test_functions:
param_list.append('--test-functions')
for test_function in test_functions:
param_list.append(test_function)
subprocess.Popen(param_list)
return timestamp
def run_suite(project, suite_name):
"""Run a suite. This is used when running suites from the GUI"""
script_name = sys.argv[0]
if script_name[-5:] != 'golem' and script_name[-9:] != 'golem.exe':
if sys.platform == 'win32':
script_name = 'golem'
else:
which_golem = subprocess.run(['which', 'golem'], stdout=subprocess.PIPE)
script_name = which_golem.stdout.decode('utf-8').strip()
timestamp = utils.get_timestamp()
param_list = [
script_name,
'--golem-dir',
session.testdir,
'run',
project,
suite_name,
'--timestamp',
timestamp
]
subprocess.Popen(param_list)
return timestamp
class GolemActionParser:
"""Generates a list of golem actions by reading the functions docstrings
This class is a singleton. The list of action definitions
is cached so only the first time they are required will be
retrieved by parsing the golem.actions module
This class expects the docstrings of the actions to have this format:
def some_action(param1, param2, param3):
'''This is the description of the action function
parameters:
param1 : element
param2 : value
param3 (int, float) : value
'''
This would generate the following list:
actions = [
{
'name': 'some_action',
'description': 'This is the description of the action'
'parameters': [{'name': 'param1', 'type': 'element'},
{'name': 'param2', 'type': 'value'},
{'name': 'param3 (int, float)', 'type': 'value'}]
}
]
Note: the `type` distinction (element or value) is used by the GUI
test builder because it needs to know if it should use element
autocomplete (page object elements) or data autocomplete
(columns of the datatable)
"""
__instance = None
actions = None
explicit_actions = None
def __new__(cls):
if GolemActionParser.__instance is None:
GolemActionParser.__instance = object.__new__(cls)
return GolemActionParser.__instance
@staticmethod
def _is_module_function(mod, func):
return inspect.isfunction(func) and inspect.getmodule(func) == mod
@staticmethod
def _parse_docstring(docstring):
description = ''
parameters = []
split = docstring.split('Parameters:')
desc_lines = [x.strip() for x in split[0].splitlines() if len(x.strip())]
description = ' '.join(desc_lines)
if len(split) == 2:
param_lines = [x.strip() for x in split[1].splitlines() if len(x.strip())]
for param_line in param_lines:
param_parts = param_line.split(':')
param = {
'name': param_parts[0].strip(),
'type': param_parts[1].strip()
}
parameters.append(param)
return description, parameters
def _get_actions(self):
actions = []
actions_module = golem.actions
def is_valid_function(function, module):
"""A valid action function must be defined
in the actions module and must not start
with underscore
"""
if self._is_module_function(module, function):
if not function.__name__.startswith('_'):
return True
return False
action_func_list = [function for function in actions_module.__dict__.values()
if is_valid_function(function, actions_module)]
for action in action_func_list:
doc = action.__doc__
if doc is None:
print(f'Warning: action {action.__name__} does not have docstring defined')
elif 'DEPRECATED' in doc:
pass
else:
description, parameters = self._parse_docstring(doc)
action_def = {
'name': action.__name__,
'description': description,
'parameters': parameters
}
actions.append(action_def)
explicit_actions = copy.deepcopy(actions)
for action in explicit_actions:
action['name'] = f'actions.{action["name"]}'
# add 'code_block' action
code_block_action = {
'description': 'Insert code block',
'parameters': [],
'name': 'code_block'
}
actions.append(code_block_action)
explicit_actions.append(code_block_action)
self.actions = actions
self.explicit_actions = explicit_actions
def get_actions(self, project_name=None):
if self.actions is None:
self._get_actions()
if project_name:
settings = settings_manager.get_project_settings(project_name)
else:
settings = settings_manager.get_global_settings()
if settings['implicit_actions_import']:
return self.actions
else:
return self.explicit_actions
def get_supported_browsers_suggestions():
"""Return a list of supported browsers by default."""
supported_browsers = [
'chrome',
'chrome-remote',
'chrome-headless',
'chrome-remote-headless',
'edge',
'edge-remote',
'firefox',
'firefox-headless',
'firefox-remote',
'firefox-remote-headless',
'ie',
'ie-remote',
'opera',
'opera-remote',
]
return supported_browsers
def project_exists(func):
"""A wrapper that checks if the requested project exists.
* The annotated function must have a `project` argument.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if not test_directory.project_exists(kwargs['project']):
abort(404, f'The project {kwargs["project"]} does not exist.')
return func(*args, **kwargs)
return wrapper
def permission_required(permission):
"""A wrapper that checks if the current user
has the required permissions for a page
* The annotated function must have a `project` argument for project pages.
* The current user must be available in `flask_login.current_user`
* The user object must have a `project_weight(project) method`
"""
def check_permissions(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_superuser:
project = kwargs.get('project', None)
if project:
user_weight = current_user.project_weight(project)
else:
user_weight = 0
required_weight = Permissions.get_weight(permission)
if user_weight < required_weight:
return render_template('not_permission.html')
return func(*args, **kwargs)
return wrapper
return check_permissions
def get_secret_key():
"""Try to get the secret key from the .golem file
located in the test directory.
A default secret key will be returned if the .golem file
does not have a secret key defined.
Example .golem file:
[gui]
secret_key = my_secret_key_string
"""
golem_file = os.path.join(session.testdir, '.golem')
if not os.path.isfile(golem_file):
sys.exit(errors.invalid_test_directory.format(session.testdir))
config = configparser.ConfigParser()
config.read(golem_file)
if 'gui' not in config:
print('Warning: gui config section not found in .golem file. Using default secret key')
secret_key = DEFAULT_SECRET_KEY
elif 'secret_key' not in config['gui']:
print('Warning: secret_key not found in .golem file. Using default secret key')
secret_key = DEFAULT_SECRET_KEY
else:
secret_key = config['gui']['secret_key']
return secret_key
class ProjectsCache:
"""A cache of projects.
The cache should be updated when projects are added or removed.
"""
_projects = None
@staticmethod
def get():
if ProjectsCache._projects is None:
ProjectsCache._projects = test_directory.get_projects()
return ProjectsCache._projects
@staticmethod
def get_user_projects():
return [p for p in ProjectsCache.get() if p in current_user.project_list ]
@staticmethod
def add(project_name):
ProjectsCache.get()
ProjectsCache._projects.append(project_name)
@staticmethod
def remove(project_name):
ProjectsCache.get()
if project_name in ProjectsCache._projects:
ProjectsCache._projects.remove(project_name)
|
io_import_vmf/cube2equi.py | lasa01/io_import_vmf | 153 | 11140532 | # Originally by adamb70 from https://github.com/adamb70/Python-Spherical-Projection
# Modified to be used with Source Engine cubemaps.
# Converted to numpy to achieve reasonable performance.
import numpy
from numpy import ndarray
from enum import IntEnum
from typing import Tuple
def spherical_coordinates(i: ndarray, j: ndarray, w: float, h: float) -> Tuple[ndarray, ndarray]:
""" Returns spherical coordinates of the pixel from the output image. """
theta = 2*i/w-1
phi = 2*j/h-1
# phi = lat, theta = long
return phi*(numpy.pi/2), theta*numpy.pi
def vector_coordinates(phi: ndarray, theta: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
""" Returns 3D vector which points to the pixel location inside a sphere. """
phi_cos = numpy.cos(phi)
return (phi_cos * numpy.cos(theta), # X
numpy.sin(phi), # Y
phi_cos * numpy.sin(theta)) # Z
class CubemapFace(IntEnum):
LEFT = 0
RIGHT = 1
TOP = 2
BOTTOM = 3
FRONT = 4
BACK = 5
def get_face(x: ndarray, y: ndarray, z: ndarray) -> ndarray:
""" Uses 3D vector to find which cube face the pixel lies on. """
abs_x = numpy.abs(x)
abs_y = numpy.abs(y)
abs_z = numpy.abs(z)
largest_magnitude = numpy.maximum.reduce((abs_x, abs_y, abs_z))
x_selector: ndarray = largest_magnitude - abs_x < 1e-9
x_specifier: ndarray = x < 0
y_selector: ndarray = largest_magnitude - abs_y < 1e-9
y_specifier: ndarray = y < 0
z_selector: ndarray = largest_magnitude - abs_z < 1e-9
z_specifier: ndarray = z < 0
return numpy.select(
(
x_selector & x_specifier, x_selector & ~x_specifier,
y_selector & y_specifier, y_selector & ~y_specifier,
z_selector & z_specifier, z_selector & ~z_specifier,
),
(
CubemapFace.LEFT, CubemapFace.RIGHT,
CubemapFace.TOP, CubemapFace.BOTTOM,
CubemapFace.BACK, CubemapFace.FRONT,
),
)
def raw_face_coordinates(face: ndarray, x: ndarray, y: ndarray, z: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""
Return coordinates with necessary sign (- or +) depending on which face they lie on.
From Open-GL specification (chapter 3.8.10) https://www.opengl.org/registry/doc/glspec41.core.20100725.pdf
"""
front = face == CubemapFace.FRONT
back = face == CubemapFace.BACK
bottom = face == CubemapFace.BOTTOM
top = face == CubemapFace.TOP
left = face == CubemapFace.LEFT
right = face == CubemapFace.RIGHT
x_neg = -x
xc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
x_neg, x, z, z, -z, z,
)
)
yc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
y, y, x_neg, x, y, y,
)
)
ma = numpy.select(
(
front, back, bottom, top, left, right,
),
(
z, z, y, y, x, x,
)
)
return xc, yc, ma
def raw_coordinates(xc: ndarray, yc: ndarray, ma: ndarray) -> Tuple[ndarray, ndarray]:
"""
Return 2D coordinates on the specified face relative to the bottom-left corner of the face.
Also from Open-GL spec.
"""
return (xc/numpy.abs(ma) + 1) / 2, (yc/numpy.abs(ma) + 1) / 2
def normalized_coordinates(face: ndarray, x: ndarray, y: ndarray, n: int) -> Tuple[ndarray, ndarray]:
""" Return pixel coordinates in the input image where the specified pixel lies. """
return (x*n).clip(0, n-1), (y*n).clip(0, n-1)
def find_corresponding_pixels(width: int, height: int, out_dim: int) -> Tuple[ndarray, Tuple[ndarray, ndarray]]:
""" Returns face index, pixel coordinates for the input image that a specified pixel in the output image maps to."""
y, x = numpy.mgrid[0:height, 0:width]
y = y[::-1]
spherical = spherical_coordinates(x, y, width, height)
vector_coords = vector_coordinates(spherical[0], spherical[1])
face = get_face(vector_coords[0], vector_coords[1], vector_coords[2])
raw_face_coords = raw_face_coordinates(face, vector_coords[0], vector_coords[1], vector_coords[2])
cube_coords = raw_coordinates(raw_face_coords[0], raw_face_coords[1], raw_face_coords[2])
return face, normalized_coordinates(face, cube_coords[0], cube_coords[1], out_dim)
|
nncf/torch/pruning/filter_pruning/functions.py | MaximProshin/nncf | 136 | 11140536 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
def l1_filter_norm(weight_tensor, dim=0):
"""
Calculates L1 for weight_tensor for the selected dimension.
"""
weight_tensor = weight_tensor.transpose(0, dim).contiguous()
return torch.norm(weight_tensor.view(weight_tensor.shape[0], -1), p=1, dim=1)
def l2_filter_norm(weight_tensor, dim=0):
"""
Calculates L2 for weight_tensor for the selected dimension.
"""
weight_tensor = weight_tensor.transpose(0, dim).contiguous()
return torch.norm(weight_tensor.view(weight_tensor.shape[0], -1), p=2, dim=1)
def tensor_l2_normalizer(weight_tensor):
norm = torch.sqrt(torch.sum(torch.abs(weight_tensor) ** 2))
return weight_tensor / norm
def geometric_median_filter_norm(weight_tensor, dim=0):
"""
Compute geometric median norm for filters.
:param weight_tensor: tensor with weights
:param dim: dimension of output channel
:return: metric value for every weight from weights_tensor
"""
weight_tensor = weight_tensor.transpose(0, dim).contiguous()
filters_count = weight_tensor.size(0)
weight_vec = weight_tensor.view(filters_count, -1)
similarity_matrix = torch.cdist(weight_vec[None, :], weight_vec[None, :], p=2.0)
return similarity_matrix.squeeze().sum(axis=0).to(weight_tensor.device)
FILTER_IMPORTANCE_FUNCTIONS = {
'L2': l2_filter_norm,
'L1': l1_filter_norm,
'geometric_median': geometric_median_filter_norm
}
def calculate_binary_mask(importance, threshold):
return (importance >= threshold).float()
|
cogdl/layers/base_layer.py | cenyk1230/cogdl | 1,072 | 11140538 | import torch
import torch.nn as nn
class BaseLayer(nn.Module):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def forward(self, graph, x):
m = self.message(x[graph.edge_index[0]])
return self.aggregate(graph, m)
def message(self, x):
return x
def aggregate(self, graph, x):
result = torch.zeros(graph.num_nodes, x.shape[1], dtype=x.dtype).to(x.device)
result.scatter_add_(0, graph.edge_index[1].unsqueeze(1).expand(-1, x.shape[1]), x)
return result
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/crowd_controller.py | felipeek/bullet3 | 9,136 | 11140564 | <filename>examples/pybullet/gym/pybullet_envs/minitaur/robots/crowd_controller.py
# Lint as: python3
"""Crowd objects/human controllers module."""
import abc
import collections
from typing import Any, Callable, Dict, Iterable, List, Optional, Union, Sequence, Text
from absl import logging
import dataclasses
import gin
import numpy as np
#import rvo2
from pybullet_envs.minitaur.envs_v2.sensors import base_position_sensor
from pybullet_envs.minitaur.envs_v2.sensors import sensor as generic_sensor
from pybullet_envs.minitaur.robots import autonomous_object
from pybullet_envs.minitaur.robots import object_controller
POSITION_SENSOR_POSTFIX = "_pos"
@dataclasses.dataclass
class MovingObjectRecord:
position_key: Text
agent_id: int
radius: float
last_position: Optional[np.ndarray] = None
@gin.configurable
def sample_start_target_position(scene,
start=None,
start_circles=None,
target_circles=None,
num_sampling_retries=1,
min_wall_distance=0.0,
min_goal_euclidean_distance=0.0,
max_goal_euclidean_distance=np.Inf,
min_path_clearance=None):
"""Sample valid start and target position reachable from start.
Args:
scene: a SceneBase instance implementing get_random_valid_position function.
start: a 2-tuple (x, y) of start position. If specified, no start is
sampled.
start_circles: a list of circle specification. Each circle is specified as
a tuple ((x, y), r) of a center (x, y) and radius r. If specified, start
position is sampled from within one of the start_circles.
target_circles: same as start_circle. If specified, target positions is
sampled from within one of the start_circles.
num_sampling_retries: a positive int, number of attempts to sample a
start, target pair.
min_wall_distance: a float, the minimum distance to a wall.
min_goal_euclidean_distance: a positive float, the minimum distance between
start and target.
max_goal_euclidean_distance: a positive float, the maximum distance between
start and target.
min_path_clearance: float, clearance of shortest path to walls.
Returns:
A 4 tuple (start, target, shortest_path, is_valid). start and target are
start and target positions, shortest_path is a list of 2-tuples specifying
the shortest path from start to target, is_valid is bool specifying whether
the start, target pair is valid. If min_path_clearance is not specified,
then shortest_path is None.
"""
if not hasattr(scene, "get_random_valid_position"):
raise ValueError(
"Incompatible scene {}. Expected to have `get_random_valid_position` "
"method.".format(scene))
def _print_counters(counters):
for name, value in counters.items():
logging.info(" %s: %d", name, value)
sampling_counters = collections.defaultdict(lambda: 0)
for _ in range(num_sampling_retries):
if start is None:
start_pos = scene.get_random_valid_position(
min_wall_distance, inclusion_circles=start_circles)
else:
if start_circles is not None:
raise ValueError("At most one of the arguments start and start_circles "
"can be not None.")
start_pos = start
target_pos = scene.get_random_valid_position(
min_wall_distance, inclusion_circles=target_circles)
sampling_counters["attempts"] += 1
euclidean_distance = np.linalg.norm(target_pos - start_pos)
if euclidean_distance < min_goal_euclidean_distance:
sampling_counters["min_euclidean"] += 1
continue
if euclidean_distance > max_goal_euclidean_distance:
sampling_counters["max_euclidean"] += 1
continue
# Skip the path computation is no path clearance is provided.
if min_path_clearance is None:
logging.info("Valid goal with no minimum path clearance checking.")
_print_counters(sampling_counters)
return start_pos, target_pos, None, True
# Check the goal clearance along the shortest path
if not hasattr(scene, "find_shortest_path"):
raise ValueError(
f"scene %s missing find_shortest_path method {scene}")
# This is a slow process.
shortest_path = scene.find_shortest_path(
start_pos[:2], target_pos[:2], min_path_clearance)
# No path exists between current robot position and goal satisfying the
# clearance.
if shortest_path is None:
sampling_counters["path_clearance"] += 1
continue
logging.info("Valid start/target with path clearance checking.")
_print_counters(sampling_counters)
return start_pos, target_pos, shortest_path, True
logging.info("No valid start/target found.")
_print_counters(sampling_counters)
return start_pos, target_pos, None, False
class CrowdController(metaclass=abc.ABCMeta):
"""Crowd controller interface."""
def __init__(self, names: Iterable[Text],
position_key_formatter="%s" + POSITION_SENSOR_POSTFIX):
"""Constructor.
Args:
names: Name of instance (dynamic object or human).
position_key_formatter: Formatter to convert name to position sensor name.
"""
self._names = list(names)
self._position_key_formatter = position_key_formatter
self._num_instance = len(self._names)
self._current_time = 0
def _validate_instance_id(self, instance_id):
if not 0 <= instance_id < self._num_instance:
raise ValueError(
f"instance_id must be an integer in [0, {self.num_instance}), "
f"got {instance_id}.")
@property
def num_instance(self):
"""Returns the number of crowd instances."""
return self._num_instance
def instance_name(self, instance_id: int) -> Text:
"""Returns the name of instance."""
self._validate_instance_id(instance_id)
return self._names[instance_id]
def instance_controller(
self, instance_id: int) -> object_controller.ControllerBase:
"""Returns the individual controller of certain instance."""
self._validate_instance_id(instance_id)
return _IndividualController(self, instance_id)
def instance_get_action(
self, instance_id: int, time_sec: float,
observations: Dict[Text, Any]) -> object_controller.ControllerOutput:
"""Returns action of specific instance given observation.
This method is for _IndividualController.
Args:
instance_id: Identifier of an object in the crowd.
time_sec: Time since simulation reset in seconds. If time < 0, returns
initial values and ignores observations.
observations: A dict of all observations.
Returns:
Position, orientation and an extra info dict for robot joints, human
skeletal pose, etc.
"""
if time_sec < 0:
self._recalculate_actions(object_controller.INIT_TIME, {})
self._current_time = object_controller.INIT_TIME
elif time_sec > self._current_time:
self._current_time = time_sec
self._recalculate_actions(self._current_time, observations)
self._validate_instance_id(instance_id)
return self._get_action_of_instance(instance_id)
@abc.abstractmethod
def _recalculate_actions(
self, time_sec: float, observations: Dict[Text, Any]) -> None:
"""Calculates crowd command for all instances in crowd."""
raise NotImplementedError(
"_recalculate_actions() should be implemented by subclass.")
@abc.abstractmethod
def _get_action_of_instance(
self, instance_id: int) -> object_controller.ControllerOutput:
"""Returns calculated actions of specific instance."""
raise NotImplementedError(
"_get_action_of_instance() should be implemented by subclass.")
def set_scene(self, scene) -> None:
"""Sets the scene for crowd controller to obtain scene information."""
del scene
class _IndividualController(object_controller.ControllerBase):
"""A utility class that wraps crowd controller in ControllerBase interface."""
def __init__(self, crowd_controller: CrowdController, instance_id: int):
"""Constructor.
Args:
crowd_controller: The controller of crowd to which this instance belong.
instance_id: Identifier of a crowd instance.
"""
self._instance_id = instance_id
self._crowd_controller = crowd_controller
def get_action(
self, time_sec: float,
observations: Dict[Text, Any]) -> object_controller.ControllerOutput:
"""Returns position, orientation and pose based on time and observations.
Args:
time_sec: Time since simulation reset in seconds. If time < 0, returns
initial values and ignores observations.
observations: A dict of all observations.
Returns:
Position, orientation and an extra info dict for robot joints, human
skeletal pose, etc.
"""
return self._crowd_controller.instance_get_action(
self._instance_id, time_sec, observations)
@gin.configurable
class StationaryController(CrowdController):
"""A crowd controller that places crowd objects at fixed positions."""
def __init__(
self, positions: Sequence[Sequence[float]],
orientations: Optional[Sequence[Sequence[float]]] = None, **kwargs):
"""Constructor.
Args:
positions: Fixed positions (3D points) of crowd instances.
orientations: Fixed orientations in quaternion of crowd instances.
**kwargs: Keyword arguments to pass on to base class.
"""
super().__init__(**kwargs)
if orientations is None:
orientations = np.array(((0, 0, 0, 1),) * self.num_instance)
if not len(positions) == len(orientations) == self.num_instance:
raise ValueError(
f"positions and orientations should all have the same length "
f"{self.num_instance}. Got len(positions) = {len(positions)}, "
f"len(orientations) = {len(orientations)}.")
self._positions = positions
self._orientations = orientations
def _recalculate_actions(
self, time_sec: float, observations: Dict[Text, Any]) -> None:
"""Calculates crowd command for all instances in crowd."""
del time_sec
del observations
def _get_action_of_instance(
self, instance_id: int) -> object_controller.ControllerOutput:
"""Returns calculated actions of specific instance."""
self._validate_instance_id(instance_id)
return self._positions[instance_id], self._orientations[instance_id], {}
@gin.configurable
class OrcaController(CrowdController):
"""A crowd controller that controls crowd instances using ORCA algorithm.
Crowd instance will be initialized at a specified start position and move
towards specified target position in a linear path while avoid collision with
each other.
"""
_DEFAULT_NEIGHBOR_DISTANCE_M = 5
_DEFAULT_MAX_NEIGHBORS = 10
_DEFAULT_RADIUS_M = 0.5
_DEFAULT_MAX_SPEED_MPS = 2
_DEFAULT_TIME_HORIZON_SEC = 1.0
_DEFAULT_OBSTACLE_TIME_HORIZON_SEC = 0.3
def __init__(
self,
timestep: float,
start_positions: Optional[Sequence[Sequence[float]]] = None,
target_positions: Optional[Sequence[Sequence[float]]] = None,
use_position_generator: Optional[bool] = False,
group_sizes: Sequence[int] = None,
radius: float = _DEFAULT_RADIUS_M,
max_speed_mps: float = _DEFAULT_MAX_SPEED_MPS,
time_horizon_sec: float = _DEFAULT_TIME_HORIZON_SEC,
obstacle_time_horizon_sec: float = _DEFAULT_OBSTACLE_TIME_HORIZON_SEC,
neighbor_distance_m: float = _DEFAULT_NEIGHBOR_DISTANCE_M,
max_neighbors: int = _DEFAULT_MAX_NEIGHBORS,
workaround_erp_issue: bool = True,
moving_objects_pos_key: Sequence[Text] = (),
moving_objects_radius: Union[float, Sequence[float]] = _DEFAULT_RADIUS_M,
endless_trajectory: bool = True,
**kwargs):
"""Constructor.
Args:
timestep: Timestep of simulation.
start_positions: A list of position (x, y, z) for crowd instances as
their starting position.
target_positions: A list of position (x, y, z) for crowd instances as
their target position.
use_position_generator: a boolean, if True than the start and end
positions are sampled. start_positions and target_positions must be None
group_sizes: If set, then crowd is split in groups randomly, whose sizes
are picked in random from this group_size list. In this way, the
crowd simulator sumulaters clusters of objects moving around.
radius: Radius of crowd instances.
max_speed_mps: Maximum crowd instance speed.
time_horizon_sec: Time horizon in second.
obstacle_time_horizon_sec: Time horizon for static obstacle in second.
neighbor_distance_m: Neighbor distance in meters. Instances closer than
this distance are considered neighbors.
max_neighbors: Max number of neighbors.
workaround_erp_issue: There is an issue with pybullet constraint that the
constraint is solved only 20% per timestep. Need to amplify position
delta by 5x to workaround this issue.
moving_objects_pos_key: Position observation key of moving objects not
controlled by the ORCA controller.
moving_objects_radius: Radius of moving objects. Should be a float, which
applies to all moving objects, or a sequence of float, which should be
of the same length as moving_objects_pos_key.
endless_trajectory: Only valid if use_position_generator is True. Agent
returns to starting point after reaching goal to achieve endless motion.
**kwargs: Keyword arguments to pass on to base class.
"""
super().__init__(**kwargs)
assert ((start_positions is not None and target_positions is not None) or
use_position_generator)
if not use_position_generator:
if not len(start_positions) == len(target_positions) == self.num_instance:
raise ValueError(
f"start_positions and target_positions should both have length "
f"equals {self.num_instance}: "
f"len(start_positions) = {len(start_positions)}, "
f"len(target_positions) = {len(target_positions)}.")
self._timestep = timestep
self._radius = radius
self._max_speed_mps = max_speed_mps
self._time_horizon_sec = time_horizon_sec
self._obstacle_time_horizon_sec = obstacle_time_horizon_sec
self._neighbor_distance_m = neighbor_distance_m
self._max_neighbors = max_neighbors
self._use_position_generator = use_position_generator
self._endless_trajectory = endless_trajectory
self._scene = None
if isinstance(moving_objects_radius, float):
moving_objects_radius = [
moving_objects_radius] * len(moving_objects_pos_key)
if len(moving_objects_radius) != len(moving_objects_pos_key):
raise ValueError(
"moving_objects_radius should be either a float or a sequence of "
"float with the same length as moving_objects_pos_key.")
self._moving_objects = [
MovingObjectRecord(position_key=key, agent_id=-1, radius=radius)
for key, radius in zip(moving_objects_pos_key, moving_objects_radius)]
self._paths = None
self._path_indices = None
if self._use_position_generator:
self._start_positions = None
self._target_positions = None
else:
self._start_positions = np.array(start_positions, dtype=np.float64)
self._target_positions = np.array(target_positions, dtype=np.float64)
# A guard against multiple initializations. See recalculate_actions below.
self._already_initialized = False
self._group_sizes = [1] if group_sizes is None else group_sizes
# The following variables are initialized in _recalculate_actions()
self._current_positions = None
self._command_positions = None
self._command_orientations = None
#self._orca = rvo2.PyRVOSimulator(
# self._timestep, # timestep
# self._neighbor_distance_m, # neighborDist
# self._max_neighbors, # maxNeighbors
# self._time_horizon_sec, # timeHorizon
# self._obstacle_time_horizon_sec, # timeHorizonObst
# self._radius, # radius
# self._max_speed_mps # maxSpeed
#)
for i in range(self.num_instance):
if self._use_position_generator:
start_position = (0, 0)
else:
start_position = self._start_positions[i, :2]
agent_id = self._orca.addAgent(
tuple(start_position),
self._neighbor_distance_m, # neighborDist
self._max_neighbors, # maxNeighbors
self._time_horizon_sec, # timeHorizon
self._obstacle_time_horizon_sec, # timeHorizonObst
self._radius, # radius
self._max_speed_mps, # maxSpeed
(0.0, 0.0)) # velocity
assert agent_id == i
for obj in self._moving_objects:
obj.agent_id = self._orca.addAgent(
(0.0, 0.0), # position (will adjust after simulation starts)
self._neighbor_distance_m, # neighborDist
self._max_neighbors, # maxNeighbors
self._timestep, # timeHorizon
self._timestep, # timeHorizonObst
obj.radius, # radius
self._max_speed_mps, # maxSpeed
(0.0, 0.0)) # velocity
self._workaround_erp_issue = workaround_erp_issue
def _subsample_path(self, path, subsample_step=1.0):
subsampled_path = [path[0]]
traveled_dist = 0.0
for i, (s, t) in enumerate(zip(path[:-1], path[1:])):
traveled_dist += np.sqrt(
np.square(s[0] - t[0]) + np.square(s[1] - t[1]))
if traveled_dist > subsample_step or i >= len(path) - 2:
subsampled_path.append(t)
traveled_dist = 0.0
return subsampled_path
def _generate_start_target_positions(self):
"""Generates start and target positions using goal generartors."""
assert self._scene is not None
self._start_positions = np.zeros((self.num_instance, 3), dtype=np.float64)
self._target_positions = np.zeros((self.num_instance, 3), dtype=np.float64)
self._paths = []
self._path_indices = []
start_circles, target_circles = None, None
group_radius = 1.0
current_group_size = np.random.choice(self._group_sizes)
index_in_current_group = 0
for i in range(self._num_instance):
start_pos, target_pos, path, is_valid = sample_start_target_position(
self._scene,
start_circles=start_circles,
target_circles=target_circles)
if index_in_current_group == current_group_size - 1:
start_circles, target_circles = None, None
index_in_current_group = 0
current_group_size = np.random.choice(self._group_sizes)
else:
if start_circles is None:
start_circles = [(start_pos[:2], group_radius)]
target_circles = [(target_pos[:2], group_radius)]
else:
start_circles += [(start_pos[:2], group_radius)]
target_circles += [(target_pos[:2], group_radius)]
index_in_current_group += 1
if not is_valid:
raise ValueError("No valid start/target positions.")
self._start_positions[i, :] = start_pos
self._target_positions[i, :] = target_pos
subsampled_path = self._subsample_path(path)
self._paths.append(np.array(subsampled_path, dtype=np.float32))
self._path_indices.append(0)
def _recalculate_actions(
self, time_sec: float, observations: Dict[Text, Any]) -> None:
"""Calculates crowd command for all crowd instances."""
if self._use_position_generator:
if (time_sec == object_controller.INIT_TIME and
self._start_positions is None and
not self._already_initialized):
self._generate_start_target_positions()
# Initialize only once per initial time even if recalculate actions
# is called multiple times.
self._already_initialized = True
if time_sec == object_controller.INIT_TIME:
# Resets orca simulator.
for i in range(len(self._names)):
self._orca.setAgentPosition(i, tuple(self._start_positions[i, :2]))
self._command_positions = self._start_positions.copy()
self._current_positions = self._start_positions.copy()
self._command_orientations = np.repeat(
((0.0, 0.0, 0.0, 1.0),), len(self._names), axis=0)
self._last_target_recalculation_sec = time_sec
return
else:
# The moment we step beyond initial time, we can initialize again.
self._already_initialized = False
if self._use_position_generator:
for i in range(self._num_instance):
dist = np.linalg.norm(
self._current_positions[i, :] - self._target_positions[i, :])
if dist < 2.0:
_, target_pos, path, is_valid = sample_start_target_position(
self._scene, self._current_positions[i, :])
if is_valid:
self._target_positions[i, :] = target_pos
subsampled_path = self._subsample_path(path)
self._paths.append(np.array(subsampled_path, dtype=np.float32))
self._path_indices.append(0)
# Sets agent position and preferred velocity based on target.
for i, agent_name in enumerate(self._names):
position = observations[self._position_key_formatter % agent_name]
self._orca.setAgentPosition(
i, tuple(position[:2])) # ORCA uses 2D position.
self._current_positions[i, :2] = position[:2]
if self._paths is not None:
# Find closest point on the path from start to target, which (1) hasn't
# been covered already; (2) is at least max_coverage_distance away from
# current position.
distances = np.sqrt(np.sum(np.square(
self._paths[i] - position[:2]), axis=1))
max_coverage_distance = 1.0
index = self._path_indices[i]
while True:
if index >= len(self._paths[i]) - 1:
if self._endless_trajectory:
self._paths[i] = self._paths[i][::-1]
distances = distances[::-1]
index = 0
break
elif distances[index] > max_coverage_distance:
break
else:
index += 1
self._path_indices[i] = index
target_position = self._paths[i][index, :]
else:
target_position = self._target_positions[i][:2]
goal_vector = target_position - position[:2]
goal_vector_norm = np.linalg.norm(goal_vector) + np.finfo(np.float32).eps
goal_unit_vector = goal_vector / goal_vector_norm
kv = 1
velocity = min(kv * goal_vector_norm,
self._DEFAULT_MAX_SPEED_MPS) * goal_unit_vector
self._orca.setAgentPrefVelocity(i, tuple(velocity))
for obj in self._moving_objects:
position = observations[obj.position_key]
self._orca.setAgentPosition(obj.agent_id, tuple(position[:2]))
if obj.last_position is None:
self._orca.setAgentPrefVelocity(obj.agent_id, (0.0, 0.0))
else:
velocity = (position - obj.last_position) / self._timestep
self._orca.setAgentPrefVelocity(obj.agent_id, tuple(velocity[:2]))
obj.last_position = position.copy()
# Advances orca simulator.
self._orca.doStep()
# Retrieve agent position and save in buffer.
for i in range(len(self._names)):
x, y = self._orca.getAgentPosition(i)
self._command_positions[i, :2] = (x, y)
yaw = np.arctan2(y - self._current_positions[i, 1],
x - self._current_positions[i, 0])
self._command_orientations[i] = (0, 0, np.sin(yaw / 2), np.cos(yaw / 2))
def _get_action_of_instance(
self, instance_id) -> object_controller.ControllerOutput:
"""Returns calculated actions of specific instance."""
if self._command_positions is None:
raise RuntimeError(
"Attempted to get action of instance before _recalculate_actions().")
self._validate_instance_id(instance_id)
if self._workaround_erp_issue:
k_erp = 1 / 0.2
delta_position = (
self._command_positions[instance_id] -
self._current_positions[instance_id])
command_position = (
self._current_positions[instance_id] + k_erp * delta_position)
else:
command_position = self._command_positions[instance_id].copy()
return command_position, self._command_orientations[instance_id], {}
def set_scene(self, scene) -> None:
"""Sets the scene for crowd controller to obtain scene information."""
try:
polygons = scene.vectorized_map
for polygon in polygons:
self._orca.addObstacle([tuple(point) for point in polygon])
self._orca.processObstacles()
self._scene = scene
except NotImplementedError:
logging.exception("Scene does not implement vectorized_map property. "
"Crowd agent cannot avoid static obstacles.")
@gin.configurable
def uniform_object_factory(
instance_id: int,
object_factory: Callable[..., autonomous_object.AutonomousObject],
*args, **kwargs) -> autonomous_object.AutonomousObject:
"""A wrapper that removes instance_id in default crowd object factory."""
del instance_id
return object_factory(*args, **kwargs)
@gin.configurable
def random_object_factory(
instance_id: int,
object_factories: Iterable[
Callable[..., autonomous_object.AutonomousObject]],
*args, **kwargs) -> autonomous_object.AutonomousObject:
"""A wrapper that removes instance_id in default crowd object factory."""
del instance_id
object_factory = np.random.choice(object_factories)
return object_factory(*args, **kwargs)
@gin.configurable
def sensor_factory(instance_id: int, sensor: Callable[...,
generic_sensor.Sensor],
*args, **kwargs) -> generic_sensor.Sensor:
del instance_id
return sensor(*args, **kwargs)
@gin.configurable
class CrowdBuilder(object):
"""A helper class to construct a crowd."""
def __init__(
self,
num_instance: int,
crowd_controller_factory: Callable[..., CrowdController],
object_factory: Callable[..., autonomous_object.AutonomousObject],
sensor_factories: Iterable[Callable[..., generic_sensor.Sensor]] = None):
"""Constructor.
Args:
num_instance: Number of autonomous objects in the crowd.
crowd_controller_factory: A callable that returns a crowd controller
object.
object_factory: Callable that returns an autonomous object.
sensor_factories: list of sensor callables.
"""
self._objects = []
crowd_id_prefix = "crowd"
names = [crowd_id_prefix + "_%d" % i for i in range(num_instance)]
self._controller = crowd_controller_factory(names=names)
for i in range(num_instance):
position_sensor = base_position_sensor.BasePositionSensor(
name=names[i] + POSITION_SENSOR_POSTFIX)
# Add additional per agent sensors (e.g. camera, occupancy, etc.).
add_sensors = []
if sensor_factories:
for s in sensor_factories:
add_sensors.append(
sensor_factory(
instance_id=i, sensor=s, name=names[i] + "_" + s.__name__))
an_object = object_factory(
instance_id=i,
sensors=(position_sensor,) + tuple(add_sensors),
controller=self._controller.instance_controller(i))
self._objects.append(an_object)
@property
def crowd_objects(self) -> List[autonomous_object.AutonomousObject]:
"""Returns list of AutonomousObjects in the crowd."""
return self._objects
@property
def crowd_controller(self) -> CrowdController:
"""Returns the crowd controller."""
return self._controller
|
open-codegen/opengen/definitions.py | elsuizo/optimization-engine | 253 | 11140602 | import pkg_resources
def templates_dir():
"""Directory where the templates are found (for internal use, mainly)"""
return pkg_resources.resource_filename('opengen', 'templates/')
def templates_subdir(subdir=None):
"""
Directory where the templates are found and subfolder relative
to that path(for internal use, mainly)
"""
if subdir is None:
return templates_dir()
return pkg_resources.resource_filename('opengen', 'templates/%s/' % subdir)
def original_icasadi_dir():
"""Directory where the original icasadi files are found (for internal use)"""
return pkg_resources.resource_filename('opengen', 'icasadi/')
|
setup.py | jmaupetit/md2pdf | 114 | 11140639 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
md2pdf - setup file
"""
import md2pdf
from setuptools import setup, find_packages
def parse_requirements(requirements, ignore=('setuptools',)):
"""
Read dependencies from requirements file (with version numbers if any)
Notes:
- this implementation does not support requirements files with extra
requirements
- this implementation has been taken from TailorDev/Watson's setup file
"""
with open(requirements) as f:
packages = set()
for line in f:
line = line.strip()
if line.startswith(('#', '-r', '--')):
continue
if '#egg=' in line:
line = line.split('#egg=')[1]
pkg = line.strip()
if pkg not in ignore:
packages.add(pkg)
return list(packages)
setup(
name='md2pdf',
version=md2pdf.__version__,
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
setup_requires=['pytest-runner', ],
tests_require=parse_requirements('requirements-dev.txt'),
author='<NAME>',
author_email='<EMAIL>',
description='md2pdf, a Markdown to PDF conversion tool',
license='MIT',
keywords='markdown converter css pdf',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Customer Service',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Office/Business',
'Topic :: Utilities',
],
)
|
mysqlbinlog_back.py | bbotte/mysqlbinlog_flashback | 195 | 11140647 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
产生mysqlbinlog反向的sql的主程序
v0.1.0 2016/10/20 yilai created
"""
import traceback
import os,sys
import logging
from func import init_logger,print_stack
from flashback import Parameter,deal_all_event,generate_create_table,convert_datetime_to_timestamp
import codecs
from datetime import datetime,timedelta
from time import mktime
from constant import Constant
from optparse import OptionParser
from mysql_table import MysqlTable
logger = logging.getLogger(__name__)
def get_check_option():
"""
得到和检查用户输入的参数,返回参数对象
"""
logger.debug(sys.argv)
usage = 'usage: python %prog [options]' \
'\nsample1:python %prog --host="127.0.0.1" --username="root" --port=43306 --password="" --schema=test --table="test5"' \
'\n' \
'sample2:python %prog --host="127.0.0.1" --username="root" --port=43306 --password="" --schema=test --table="test5,test6" ' \
'--binlog_end_time="2016-11-05 11:27:13" --binlog_start_file_name="mysql-bin.000024" --binlog_start_file_position=4 ' \
'--binlog_start_time="2016-11-04 11:27:13" --skip_delete --skip_insert --add_schema_name' \
'\nsample3:python %prog --host="127.0.0.1" --username="root" --port=43306 --password="" --schema=test --table="test5,test6" --binlog_start_file_name="mysql-bin.000022"'
parser = OptionParser(usage)
parser.add_option("-H","--host", type='string', help="mandatory,mysql hostname" )
parser.add_option("-P","--port", type='int',default=3306,help="mysql port,default 3306" )
parser.add_option("-u","--username", type='string', help="mandatory,username" )
parser.add_option("-p","--password", type='string',default="",help="password" )
#TODO 只能是一个字符串哦,先不支持多个schema?实际能支持,就是文件名有comma
parser.add_option("-s","--schema", type='string',help="mandatory,mysql schema")
parser.add_option("-t","--tables", type='string',
help="mandatory,mysql tables,suport multiple tables,use comma as separator")
parser.add_option("-N","--binlog_end_time", type='string',
help="binlog end time,format yyyy-mm-dd hh24:mi:ss,default is current time ")
parser.add_option("-S","--binlog_start_file_name", type='string',
help="binlog start file name,default is current logfile of db")
#TODO 开始位置不能写在这?
parser.add_option("-L","--binlog_start_file_position", type='int',default=4,
help="binlog start file name")
parser.add_option("-E","--binlog_start_time", type='string',
help="binlog start time,format yyyy-mm-dd hh24:mi:ss")
parser.add_option("-l","--output_file_path", type='string',default="./log",
help="file path that sql generated,,default ./log")
parser.add_option("-I","--skip_insert", action="store_true",default=False,
help="skip insert(WriteRowsEvent) event")
parser.add_option("-U","--skip_update", action="store_true",default=False,
help="skip update(UpdateRowsEvent) event")
parser.add_option("-D","--skip_delete", action="store_true",default=False,
help="skip delete(DeleteRowsEvent) event")
parser.add_option("-a","--add_schema_name", action="store_true",default=False,
help="add schema name for flashback sql")
parser.add_option("-v","--version",action="store_true",default=False,
help="version info")
(options, args) = parser.parse_args()
if options.version is True:
logger.info("version is {0}".format(Constant.VERSION))
exit(0)
if options.host is None:
raise ValueError("parameter error:host is mandatory input")
if options.username is None:
raise ValueError("parameter error:username is mandatory input")
if options.schema is None:
raise ValueError("parameter error:schema is mandatory input")
if options.tables is None:
raise ValueError("parameter error:tables is mandatory input")
if not os.path.exists(options.output_file_path) :
raise ValueError("parameter error:output {0} dir is not exists".format(options.output_file_path))
if options.skip_insert and options.skip_delete and options.skip_update:
raise ValueError("conld choose at least one event")
if not options.binlog_end_time is None:
try:
end_to_timestamp=convert_datetime_to_timestamp(options.binlog_end_time, '%Y-%m-%d %H:%M:%S')
except Exception as err:
raise ValueError("binlog_end_time {0} format error,detail error={1}".format(options.binlog_end_time,err.__str__()))
if not options.binlog_start_time is None:
try:
start_to_timestamp=convert_datetime_to_timestamp(options.binlog_start_time, '%Y-%m-%d %H:%M:%S')
except Exception as err:
raise ValueError("binlog_start_time {0} format error,detail error={1}".format(options.binlog_start_time,err.__str__()))
if not options.binlog_end_time is None:
if start_to_timestamp>=end_to_timestamp:
raise ValueError("binlog_start_time is above binlog_end_time,start_time={0},end_time={1}".
format(options.binlog_start_time,options.binlog_end_time))
return options
def parse_option():
"""
分析用户输入的参数,返回参数对象
"""
opt=get_check_option()
dict={}
dict["host"]=opt.host
dict["username"]=opt.username
dict["port"]=opt.port
dict["password"]=opt.password
dict["start_binlog_file"]=opt.binlog_start_file_name
dict["start_position"]=opt.binlog_start_file_position
dict["output_file_path"]=opt.output_file_path
dict["schema"]=opt.schema
dict["tablename"]=opt.tables
dict["keep_data"]=True
input_end_to_datetime=opt.binlog_end_time
if not input_end_to_datetime is None:
end_to_timestamp=convert_datetime_to_timestamp(input_end_to_datetime, '%Y-%m-%d %H:%M:%S')
dict["end_to_timestamp"]=int(end_to_timestamp)
input_start_to_datetime=opt.binlog_start_time
if not input_start_to_datetime is None:
start_to_timestamp=convert_datetime_to_timestamp(input_start_to_datetime, '%Y-%m-%d %H:%M:%S')
dict["start_to_timestamp"]=int(start_to_timestamp)
dict["skip_insert"]=opt.skip_insert
dict["skip_update"]=opt.skip_update
dict["skip_delete"]=opt.skip_delete
dict["add_schema_name"]=opt.add_schema_name
parameter=Parameter(**dict)
parameter.check_tables_exist()
parameter.set_defaut()
return parameter
def new_files(parameter):
"""
建立反向sql文件,保留现场数据文件和保留现场数据文件的建表语句文件
:parameter 用户输入参数的形成的实例
:return: 文件描述符,存在parameter实例中
"""
flash_filename=parameter.get_file_name("flashback")
flashback=codecs.open(flash_filename, "w", encoding=Constant.FILE_ENCODING)
parameter.file["flashback"]=flashback
logger.debug("flashback sql fileno={0}".format(parameter.file["flashback"]))
if parameter.keep_data:
data_filename=parameter.get_file_name("save_data_dml")
data=codecs.open(data_filename, "w", encoding=Constant.FILE_ENCODING)
parameter.file["data"]=data
logger.debug("data sql fileno={0}".format(parameter.file["data"]))
data_create_filename=parameter.get_file_name("save_data_create_table")
data_create=codecs.open(data_create_filename, "w", encoding=Constant.FILE_ENCODING)
parameter.file["data_create"]=data_create
logger.debug("data create sql fileno={0}".format(parameter.file["data_create"]))
def close_files(parameter):
if parameter.file["data"] is not None:
parameter.file["data"].close()
if parameter.file["data_create"] is not None:
parameter.file["data_create"].close()
if parameter.file["flashback"] is not None:
parameter.file["flashback"].close()
def print_stat(parameter):
logger.info("===statistics===")
logger.info("scan {0} events ".format(parameter.stream.event_count))
logger.info(parameter.stat)
def main():
logfilename="{0}/{1}".format(Constant.LOGFILE_PATH,Constant.LOGFILE_NAME)
init_logger(logfilename,logging.INFO)
#init_logger(logfilename)
#TODO 缺少tablemap的报警没有,会导致丢失数据
#TODO 如果表被改动了,基本没有办法哦
try:
parameter=parse_option()
except Exception as err:
logger.error(err.__str__())
print_stack()
exit(1)
try:
parameter=parse_option()
logger.info(u"parameter={0}".format(parameter.__dict__))
new_files(parameter)
deal_all_event(parameter)
generate_create_table(parameter)
print_stat(parameter)
except Exception as err:
logger.error("error:"+err.__str__())
print_stack()
finally:
parameter.stream.close()
close_files(parameter)
if __name__=='__main__':
main() |
tensorflow/compiler/tests/add_n_test.py | abhaikollara/tensorflow | 848 | 11140662 | <filename>tensorflow/compiler/tests/add_n_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AddN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class XlaAddNTest(xla_test.XLATestCase):
def testAddTensorLists(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l1 = list_ops.tensor_list_set_item(l1, 0, 5.)
l2 = list_ops.tensor_list_set_item(l2, 2, 10.)
l = math_ops.add_n([l1, l2])
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
[5.0, 0.0, 10.0])
def testAddTensorListsFailsIfLeadingDimsMismatch(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=2)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
def testAddTensorListsFailsIfElementShapesMismatch(self):
with self.session() as session, self.test_scope():
# Use placeholders instead of constant values for shapes to prevent TF's
# shape inference from catching this early.
l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l1 = list_ops.tensor_list_reserve(
element_shape=l1_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=l2_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
session.run(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
l1_element_shape: [],
l2_element_shape: [2]
})
if __name__ == "__main__":
test.main()
|
tests/test_utils_get_percent.py | ZSD-tim/dayu_widgets | 157 | 11140720 | """
Test get_percent.
"""
import pytest
from dayu_widgets import utils
@pytest.mark.parametrize('value, mini, maxi, result', (
(0, 0, 100, 0),
(100, 0, 100, 100),
(1, 0, 100, 1),
(99, 0, 100, 99),
(-1, 0, 100, 0),
(101, 0, 100, 100),
(101, 10, 110, 91),
(10, 100, 100, 100),
))
def test_get_percent(value, mini, maxi, result):
"""Test get_percent with normal arg."""
assert utils.get_percent(value, mini, maxi) == result
|
xled/cli.py | magicus/xled | 121 | 11140736 | # -*- coding: utf-8 -*-
"""Console script for xled."""
from __future__ import absolute_import
import logging
import time
import click
import click_log
import xled.auth
import xled.control
import xled.discover
import xled.exceptions
import xled.security
log = logging.getLogger(__name__)
LOGGERS = (log, xled.discover.log, xled.auth.log, xled.control.log)
def common_preamble(name=None, host_address=None):
if name:
click.echo("Looking for a device with name: {}...".format(name))
elif host_address:
click.echo("Looking for device with address: {}...".format(host_address))
else:
click.echo("Looking for any device...")
hw_address, device_name, ip_address = xled.discover.discover(
find_id=name, destination_host=host_address
)
if name:
click.echo("Working on requested device.")
else:
click.echo("Working on device: {}".format(device_name))
log.debug("HW address = %s", hw_address)
log.debug("IP address = %s", ip_address)
return xled.control.HighControlInterface(ip_address, hw_address)
def validate_time(ctx, param, value):
try:
struct_time = time.strptime(value, "%H:%M")
return (struct_time.tm_hour, struct_time.tm_min)
except ValueError:
raise click.BadParameter("Time needs to be in format HH:MM.")
@click.group()
@click.version_option()
@click.pass_context
@click.option(
"--name",
metavar="DEVICE_NAME",
help="Name of the device to operate on. Mutually exclusive with --hostname.",
)
@click.option(
"--hostname",
metavar="ADDRESS",
help="Address of the device to operate on. Mutually exclusive with --name.",
)
@click_log.simple_verbosity_option(
log,
"--verbosity-cli",
help="Sets verbosity of main CLI. Either CRITICAL, ERROR, WARNING, INFO or DEBUG",
)
@click_log.simple_verbosity_option(
xled.discover.log,
"--verbosity-discover",
help="Sets verbosity of discover module. Either CRITICAL, ERROR, WARNING, INFO or DEBUG",
)
@click_log.simple_verbosity_option(
xled.control.log,
"--verbosity-control",
help="Sets verbosity of control module. Either CRITICAL, ERROR, WARNING, INFO or DEBUG",
)
@click_log.simple_verbosity_option(
xled.auth.log,
"--verbosity-auth",
help="Sets verbosity of auth module. Either CRITICAL, ERROR, WARNING, INFO or DEBUG",
)
def main(ctx, name, hostname):
for logger in LOGGERS:
click_log.basic_config(logger)
if name and hostname:
raise click.BadParameter("Either name or hostname can be set not both.")
ctx.obj = {"name": name, "hostname": hostname}
@main.command(name="get-mode", help="Gets current device mode.")
@click.pass_context
def get_mode(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
mode = control_interface.get_mode()
click.echo("Device in mode {}.".format(mode["mode"]))
@main.command(name="on", help="Turns device on and starts last used movie.")
@click.pass_context
def turn_on(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Turning on...")
control_interface.turn_on()
click.echo("Turned on.")
@main.command(name="off", help="Turns device off.")
@click.pass_context
def turn_off(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Turning off...")
control_interface.turn_off()
click.echo("Turned off.")
@main.command(name="get-timer", help="Gets current timer settings.")
@click.pass_context
def get_timer(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Getting timer...")
timer = control_interface.get_formatted_timer()
click.echo("Time now: {}.".format(timer.now))
if timer.on is False:
click.echo("Time to turn on not set.")
else:
click.echo("Turn on {}.".format(timer.on))
if timer.off is False:
click.echo("Time to turn off not set.")
else:
click.echo("Turn off {}.".format(timer.off))
@main.command(name="set-timer", help="Sets timer.")
@click.argument("time-on", callback=validate_time)
@click.argument("time-off", callback=validate_time)
@click.pass_context
def set_timer(ctx, time_on, time_off):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
seconds_on = xled.util.seconds_after_midnight_from_time(*time_on)
seconds_off = xled.util.seconds_after_midnight_from_time(*time_off)
log.debug("Setting timer...")
control_interface.set_timer(seconds_on, seconds_off)
click.echo("Timer set.")
@main.command(name="disable-timer", help="Disables timer.")
@click.pass_context
def disable_timer(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Disabling timer...")
control_interface.disable_timer()
click.echo("Timer disabled.")
@main.command(name="get-device-name", help="Gets current device name.")
@click.pass_context
def get_device_name(ctx):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Getting device name...")
name = control_interface.get_device_name()
click.echo("Device name: {}".format(name["name"]))
@main.command(name="set-device-name", help="Sets device name.")
@click.argument("name")
@click.pass_context
def set_device_name(ctx, name):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Setting device name...")
control_interface.set_device_name(name)
click.echo("Set new name to {}".format(name))
@main.command(name="upload-movie", help="Uploads movie.")
@click.argument("movie", type=click.File("rb"))
@click.pass_context
def upload_movie(ctx, movie):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Uploading movie...")
response = control_interface.set_led_movie_full(movie)
click.echo("Uploaded {} frames.".format(response["frames_number"]))
@main.command(name="set-color", help="Sets static color.")
@click.argument("red", type=click.IntRange(0, 256))
@click.argument("green", type=click.IntRange(0, 256))
@click.argument("blue", type=click.IntRange(0, 256))
@click.pass_context
def set_color(ctx, red, green, blue):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
log.debug("Setting color")
control_interface.set_static_color(red, green, blue)
click.echo("Color set")
@main.command(name="update-firmware", help="Updates firmware.")
@click.argument("stage0", type=click.File("rb"))
@click.argument("stage1", type=click.File("rb"))
@click.pass_context
def update_firmware(ctx, stage0, stage1):
control_interface = common_preamble(ctx.obj.get("name"), ctx.obj.get("hostname"))
try:
control_interface.update_firmware(stage0, stage1)
except xled.exceptions.HighInterfaceError as hci_err:
click.echo(hci_err, err=True)
else:
click.echo("Firmware update successful.")
|
keras_fcn/metrics.py | li657835991/FCN | 235 | 11140742 | import keras.backend as K
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def Mean_IoU(classes):
def mean_iou(y_true, y_pred):
mean_iou, op = tf.metrics.mean_iou(y_true, y_pred, classes)
return mean_iou
_initialize_variables()
return mean_iou
def _initialize_variables():
"""Utility to initialize uninitialized variables on the fly.
"""
variables = tf.local_variables()
uninitialized_variables = []
for v in variables:
if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
uninitialized_variables.append(v)
v._keras_initialized = True
if uninitialized_variables:
sess = K.get_session()
sess.run(tf.variables_initializer(uninitialized_variables))
|
examples/ionq_half_adder.py | ssc1729/ProjectQ | 795 | 11140744 | # -*- coding: utf-8 -*-
# Copyright 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Example of a basic 'half-adder' circuit using an IonQBackend."""
import getpass
import random
import matplotlib.pyplot as plt
import projectq.setups.default
import projectq.setups.ionq
from projectq import MainEngine
from projectq.backends import IonQBackend
from projectq.libs.hist import histogram
from projectq.ops import CNOT, All, Barrier, Measure, Toffoli, X
def run_half_adder(eng):
"""Run the half-adder circuit."""
# allocate the quantum register to entangle
circuit = eng.allocate_qureg(4)
qubit1, qubit2, qubit3, qubit4 = circuit
result_qubits = [qubit3, qubit4]
# X gates on the first two qubits
All(X) | [qubit1, qubit2]
# Barrier
Barrier | circuit
# Cx gates
CNOT | (qubit1, qubit3)
CNOT | (qubit2, qubit3)
# CCNOT
Toffoli | (qubit1, qubit2, qubit4)
# Barrier
Barrier | circuit
# Measure result qubits
All(Measure) | result_qubits
# Flush the circuit (this submits a job to the IonQ API)
eng.flush()
# Show the histogram
histogram(eng.backend, result_qubits)
plt.show()
# return a random answer from our results
probabilities = eng.backend.get_probabilities(result_qubits)
random_answer = random.choice(list(probabilities.keys()))
return [int(s) for s in random_answer]
if __name__ == '__main__':
token = None
device = None
if token is None:
token = getpass.getpass(prompt='IonQ apiKey > ')
if device is None:
device = input('IonQ device > ')
backend = IonQBackend(
use_hardware=True,
token=token,
num_runs=200,
verbose=True,
device=device,
)
engine_list = projectq.setups.ionq.get_engine_list(
token=token,
device=device,
)
engine = MainEngine(backend, engine_list)
# run the circuit and print the result
print(run_half_adder(engine))
|
autoimpute/visuals/imputations.py | gjdv/autoimpute | 191 | 11140754 | """Visualizations to explore imputations of an incomplete dataset."""
import matplotlib.pylab as plt
import seaborn as sns
from autoimpute.utils import check_data_structure
from autoimpute.imputations import SingleImputer
from .helpers import _validate_data, _validate_kwgs, _get_observed, _melt_df
from .helpers import _default_plot_args, _plot_imp_dists_helper
#pylint:disable=unused-variable
#pylint:disable=too-many-arguments
#plyint:disable=too-many-locals
@check_data_structure
def plot_imp_scatter(d, x, y, strategy, color=None,
title="Jointplot after Imputation",
h=8.27, imp_kwgs=None, a=0.5, marginals=None,
obs_color="navy", imp_color="red", **plot_kwgs):
"""Plot the joint scatter and density plot after single imputation.
Use this method to visualize a scatterplot between two features, x and y,
where y is imputed and x is a predictor used to impute y. This method
performs single imputation and is useful to determine how an imputation
method looks under the hood.
Args:
d (pd.DataFrame): DataFrame with data to impute and plot.
x (str): column to plot on x axis.
y (str): column to plot on y axis and set color for imputation.
strategy (str): imputation method for SingleImputer.
color (str, Optional): which variable to color with imputations.
Deafult is none, which means y is colored. Other option is to
color "x". Color should be the same as "x" or "y".
title (str, Optional): title of plot.
"Defualt is Jointplot after Imputation".
h (float, Optional): height of the jointplot. Default is 8.27
imp_kwgs (dict, Optional): imp kwgs for SingleImputer procedure.
Default is None.
a (float, Optional): alpha for plot color. Default is 0.5
marginals (dict, Optional): dictionary of marginal plot args.
Default is None, configured in code below.
obs_color (str, Optional): color of observed. Default is navy.
imp_color (str, Optional): color of imputations. Default is red.
**plot_kwgs: keyword arguments used by sns.set.
Raises:
ValueError: x and y must be names of columns in data
"""
# plot setup and arg validation
_default_plot_args(**plot_kwgs)
_validate_kwgs(marginals)
_validate_kwgs(imp_kwgs)
if marginals is None:
marginals = dict(rug=True, kde=True)
# validate x and y selection
if not x in d.columns or not y in d.columns:
err = "x and y must be names of columns in data"
raise ValueError(err)
# create imputer with strategy and optional imp kwgs
if imp_kwgs is None:
imp = SingleImputer(strategy=strategy)
else:
imp = SingleImputer(strategy=strategy, imp_kwgs=imp_kwgs)
# handling the color configuration
if color is None:
color = y
else:
if color == y:
color = y
elif color == x:
color = x
else:
err = "color must be the same as `y` or `x`"
raise ValueError(err)
# configure and apply the imputer
impute = imp.fit_transform(d)
impute["colors"] = obs_color
impute.loc[imp.imputed_[color], "colors"] = imp_color
joints_color = impute["colors"]
# create the joint plot
joint_kws = dict(facecolor=joints_color, edgecolor=joints_color)
g = sns.jointplot(x=x, y=y, data=impute, alpha=a, height=h,
joint_kws=joint_kws, marginal_kws=marginals)
# final plot config and title
plt.subplots_adjust(top=0.925)
g.fig.suptitle(title)
def plot_imp_dists(d, mi, imp_col, title="Distributions after Imputation",
include_observed=True, separate_observed=True,
side_by_side=False, hist_observed=False,
hist_imputed=False, gw=(.5, .5), gh=(.5, .5), **plot_kwgs):
"""Plot the density between imputations for a given column.
Use this method to plot the density of a given column after multiple
imputation. The function allows the user to also plot the observed data
from the column prior to imputation taking place. Further, the user can
specify whether the observed should be separated into its own plot or not.
Args:
d (list): dataset returned from multiple imputation.
mi (MultipleImputer): multiple imputer used to generate d.
imp_col (str): column to plot. Should be a column with imputations.
title (str, Optional): title of plot. Default is
"Distributions after Imputation".
include_observed (bool, Optional): whether or not to include observed
data in the plot. Default is True. If False, observed data for
imp_col will not be included as a distribution for density.
separate_observed (bool, Optional): whether or not to separate the
observed data when plotting against imputed. Default is True. If
False, observed data distribution will be plotted on same plot
as the imputed data distribution. Note, this attribute matters if
and only if `include_observed=True`.
side_by_side (bool, Optional): whether columns should be plotted next
to each other or stacked vertically. Default is False. If True,
plots will be plotted side-by-side. Note, this attribute matters
if and only if `include_observed=True`.
hist_observed (bool, Optional): whether histogram should be plotted
along with the density for observed values. Default is False.
Note, this attribute matters if and only if
`include_observed=True`.
hist_imputed (bool, Optional): whether histogram should be plotted
along with the density for imputed values. Default is False. Note,
this attribute matters if and only if `include_observed=True`.
gw (tuple, Optional): if side-by-side plot, the width ratios for each
plot. Default is (.5, .5), so each plot will be same width.
Matters if and only if `include_observed=True` and
`side_by_side=True`.
gh (tuple, Optional): if stacked plot, the height ratios for each plot.
Default is (.5, .5), so each plot will be the same height.
Matters if and only if `include_observed=True` and
`side_by_side=False`.
**plot_kwgs: keyword arguments used by sns.set.
Returns:
sns.distplot: densityplot for observed and/or imputed data
Raises:
ValueError: see _validate_data method
"""
# start by setting plot kwgs
_default_plot_args(**plot_kwgs)
# define the functionality if observed should be included
if include_observed:
obs = _get_observed(d, mi, imp_col)
obs = d[0][1].loc[obs, imp_col]
# define the functionality if separate observed
if separate_observed:
g = {}
g["w"] = {"width_ratios": gw}
g["h"] = {"height_ratios": gh}
# define the functionality if side by side or not
if side_by_side:
f, ax = plt.subplots(1, 2, gridspec_kw=g["w"])
else:
f, ax = plt.subplots(2, 1, gridspec_kw=g["h"])
sns.distplot(obs, hist=hist_observed, ax=ax[0], label="Observed")
_plot_imp_dists_helper(d, hist_imputed, imp_col, ax[1])
# handle case where not separated
else:
sns.distplot(obs, hist=hist_observed, label="Observed")
_plot_imp_dists_helper(d, hist_imputed, imp_col)
# handle case where not observed
else:
_validate_data(d, mi, imp_col)
_plot_imp_dists_helper(d, hist_imputed, imp_col)
# plot title and legend
plt.suptitle(title)
plt.legend()
def plot_imp_boxplots(d, mi, imp_col, side_by_side=False,
title="Observed vs. Imputed Boxplots",
obs_kwgs=None, imp_kwgs=None, **plot_kwgs):
"""Plot the boxplots between observed and imputations for a given column.
Use this method to plot the boxplots of a given column after multiple
imputation. The function also plots the boxplot of the observed data from
the column prior to imputation taking place. Further, the user can specify
additional arguments to tailor the design of the plots themselves.
Args:
d (list): dataset returned from multiple imputation.
mi (MultipleImputer): multiple imputer used to generate d.
imp_col (str): column to plot. Should be a column with imputations.
side_by_side (bool, Optional): whether columns should be plotted next
to each other or stacked vertically. Default is False. If True,
plots will be plotted side-by-side.
title (str, Optional): title of boxplots. Default is
"Observed vs. Imputed Boxplots."
obs_kwgs (dict, Optional): dictionary of arguments to unpack for
observed boxplot. Default is None, so no additional tailoring.
imp_kwgs (dict, Optional): dictionary of arguments to unpack for
imputed boxplots. Default is None, so no additional tailoring.
**plot_kwgs: keyword arguments used by sns.set.
Returns:
sns.distplot: boxplots for observed and imputed data
Raises:
ValueError: see _validate_data method.
"""
# set plot type and define names necessary
_default_plot_args(**plot_kwgs)
obs = _get_observed(d, mi, imp_col)
obs_ = d[0][1].loc[obs, imp_col].copy().to_frame()
obs_["obs"] = "obs"
n = len(d)
ratio = 1/(n+1)
g = (ratio, 1-ratio)
datasets_merged = _melt_df(d, mi, imp_col)
# validate obs_kwgs, imp_kwgs
_validate_kwgs(obs_kwgs)
_validate_kwgs(imp_kwgs)
# deal with plotting side by side
if side_by_side:
xo = "obs"
yo = imp_col
yi = imp_col
xi = "imp_num"
f, ax = plt.subplots(
1, 2, gridspec_kw={"width_ratios": (ratio, 1-ratio)}
)
else:
xo = imp_col
yo = "obs"
yi = "imp_num"
xi = imp_col
f, ax = plt.subplots(
2, 1, gridspec_kw={"height_ratios": (ratio, 1-ratio)}
)
# dealing with plotting with or without kwgs
if not obs_kwgs is None:
sns.boxplot(
x=xo, y=yo, data=obs_, ax=ax[0], **obs_kwgs
).set(xlabel="", ylabel="")
else:
sns.boxplot(
x=xo, y=yo, data=obs_, ax=ax[0]
).set(xlabel="", ylabel="")
if not imp_kwgs is None:
sns.boxplot(
x=xi, y=yi, data=datasets_merged, ax=ax[1], **imp_kwgs
).set(xlabel="", ylabel="")
else:
sns.boxplot(
x=xi, y=yi, data=datasets_merged, ax=ax[1]
).set(xlabel="", ylabel="")
# plot title
plt.suptitle(title)
def plot_imp_swarm(d, mi, imp_col, palette=None,
title="Imputation Swarm", **plot_kwgs):
"""Create the swarm plot for multiply imputed data.
Args:
d (list): dataset returned from multiple imputation.
mi (MultipleImputer): multiple imputer used to generate d.
imp_col (str): column to plot. Should be a column with imputations.
title (str, Optional): title of plot. Default is "Imputation Swarm".
palette (list, tuple, Optional): colors for the imps and observed.
Default is None. if None, colors default to ["r","c"].
**plot_kwgs: keyword arguments used by sns.set.
Returns:
sns.distplot: swarmplot for imputed data
Raises:
ValueError: see _validate_data method.
"""
# set plot type, validate, and define names necessary
_default_plot_args(**plot_kwgs)
_validate_data(d, mi, imp_col)
datasets_merged = _melt_df(d, mi, imp_col)
if palette is None:
palette = ["r", "c"]
# swarmplot example
sns.swarmplot(
x="imp_num", y=imp_col, hue="imputed", palette=palette,
data=datasets_merged, hue_order=["yes", "no"]
).set(xlabel="Imputation Number", title=title)
def plot_imp_strip(d, mi, imp_col, palette=None,
title="Imputation Strip", **plot_kwgs):
"""Create the strip plot for multiply imputed data.
Args:
d (list): dataset returned from multiple imputation.
mi (MultipleImputer): multiple imputer used to generate d.
imp_col (str): column to plot. Should be a column with imputations.
title (str, Optional): title of plot. Default is "Imputation Strip".
palette (list, tuple, Optional): colors for the imps and observed.
Default is None. if None, colors default to ["r","c"].
**plot_kwgs: keyword arguments used by sns.set.
Returns:
sns.distplot: stripplot for imputed data
Raises:
ValueError: see _validate_data method.
"""
# set plot type, validate, and define names necessary
_default_plot_args(**plot_kwgs)
_validate_data(d, mi, imp_col)
datasets_merged = _melt_df(d, mi, imp_col)
if palette is None:
palette = ["r", "c"]
# stripplot example
sns.stripplot(
x="imp_num", y=imp_col, hue="imputed", palette=palette,
data=datasets_merged, jitter=True, hue_order=["yes", "no"], dodge=True
).set(xlabel="Imputation Number", title=title)
|
tools/demo.py | ChisenZhang/Curve-Text-Detector | 627 | 11140769 | import numpy as np
import cv2
import os, glob
import _init_paths
import caffe
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w
from fast_rcnn.nms_wrapper import nms, pnms
from utils.blob import im_list_to_blob
from shapely.geometry import *
caffe.set_mode_gpu()
caffe.set_device(0)
net_prototxt = "../models/ctd/test_ctd_tloc.prototxt"
model = "../output/ctd_tloc.caffemodel"
cofig_file = "../experiments/cfgs/rfcn_ctd.yml"
images = glob.glob("../images/demo/*.jpg")
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
rois = net.blobs['rois'].data.copy()
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
############################################### curve
info_deltas_h = blobs_out['info_pred_h']
pred_infos_h = info_syn_transform_inv_h(boxes, info_deltas_h)
info_deltas_w = blobs_out['info_pred_w']
pred_infos_w = info_syn_transform_inv_w(boxes, info_deltas_w)
assert len(boxes) == len(pred_infos_h) == len(pred_infos_w)
###############################################
return scores, pred_boxes, pred_infos_h, pred_infos_w
def vis(im, dets, thresh=0.3):
for i in xrange(np.minimum(100, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, 4]
info_bbox = dets[i, 5:33] # syn
pts = [info_bbox[i] for i in xrange(28)]
assert(len(pts) == 28), 'wrong length.'
if score > thresh:
for p in xrange(0,28,2):
cv2.line(im,(int(bbox[0]) + int(pts[p%28]), int(bbox[1]) + int(pts[(p+1)%28])),
(int(bbox[0]) + int(pts[(p+2)%28]), int(bbox[1]) + int(pts[(p+3)%28])),(0,0,255),2)
im = cv2.resize(im, (1280, 720)) # visualization
cv2.imshow('Dectecting results syn.', im)
cv2.waitKey(0)
def nps(dets, cdets):
delete_inds = []
for i in xrange(cdets.shape[0]):
bbox = cdets[i, :4]
score = cdets[i, 4]
info_bbox = cdets[i, 5:33]
pts = [(int(bbox[0]) + info_bbox[j], int(bbox[1]) + info_bbox[j+1]) for j in xrange(0,28,2)]
ploygon_test = Polygon(pts)
if not ploygon_test.is_valid:
print('non-ploygon detected')
delete_inds.append(i)
if int(ploygon_test.area) < 10:
print('neg-ploygon')
delete_inds.append(i)
dets = np.delete(dets, delete_inds, 0)
cdets = np.delete(cdets, delete_inds, 0)
return dets, cdets
if __name__ == "__main__":
cfg_from_file(cofig_file)
net = caffe.Net(net_prototxt, model, caffe.TEST)
for image in images:
im = cv2.imread(image)
scores, boxes, infos_h, infos_w = im_detect(net, im, None)
assert(scores.shape[0] == infos_h.shape[0] == infos_w.shape[0]) , 'length mismatch'
inds = np.where(scores[:, 1] > 0.5)[0]
cls_scores = scores[inds, 1]
cls_boxes = boxes[inds, 4:8]
## curve
cls_infos_h = infos_h[inds, :14]
cls_infos_w = infos_w[inds, :14]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
# stack h and w pred.
cls_infos = np.zeros((cls_infos_h.shape[0], 28))
wh_stack_temp = np.dstack((cls_infos_w, cls_infos_h))
assert(wh_stack_temp.shape[0] == cls_infos.shape[0]), 'wh stack length mismatch.'
for ixstack, row_cls_infos in enumerate(cls_infos):
cls_infos[ixstack] = wh_stack_temp[ixstack].ravel()
cls_dets_withInfo = np.hstack((cls_boxes, cls_scores[:, np.newaxis], cls_infos)) \
.astype(np.float32, copy=False)
cls_dets, cls_dets_withInfo = nps(cls_dets, cls_dets_withInfo)
if cfg.TEST.USE_PNMS:
keep = pnms(cls_dets_withInfo, cfg.TEST.PNMS)
else:
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
cls_dets_withInfo = cls_dets_withInfo[keep, :]
vis(im, cls_dets_withInfo, 0.1)
|
photoshop/api/save_options/tif.py | MrTeferi/photoshop-python-api | 270 | 11140812 | <gh_stars>100-1000
# Import local modules
from photoshop.api._core import Photoshop
class TiffSaveOptions(Photoshop):
object_name = "TiffSaveOptions"
def __int__(self):
super().__init__()
@property
def alphaChannels(self):
"""If true, the alpha channels are saved."""
return self.app.alphaChannels
@alphaChannels.setter
def alphaChannels(self, value):
self.app.alphaChannels = value
@property
def annotations(self):
"""If true, the annotations are saved."""
return self.app.annotations
@annotations.setter
def annotations(self, value):
self.app.annotations = value
@property
def byteOrder(self):
"""The order in which the bytes will be read.
Default:
Mac OS when running in Mac OS, and IBM PC when running in Windows.
"""
return self.app.byteOrder
@byteOrder.setter
def byteOrder(self, value):
self.app.byteOrder = value
@property
def embedColorProfile(self):
"""If true, the color profile is embedded in the document."""
return self.app.embedColorProfile
@embedColorProfile.setter
def embedColorProfile(self, value):
self.app.embedColorProfile = value
@property
def imageCompression(self):
"""The compression type."""
return self.app.imageCompression
@imageCompression.setter
def imageCompression(self, value):
self.app.imageCompression = value
@property
def interleaveChannels(self):
"""If true, the channels in the image are interleaved."""
return self.app.interleaveChannels
@interleaveChannels.setter
def interleaveChannels(self, value):
self.app.interleaveChannels = value
@property
def jpegQuality(self):
"""The quality of the produced image, which is inversely proportionate
to the amount of JPEG compression.
Valid only for JPEG compressed TIFF documents. Range: 0 to 12.
"""
return self.app.jpegQuality
@jpegQuality.setter
def jpegQuality(self, value):
self.app.jpegQuality = value
@property
def layerCompression(self):
return self.app.layerCompression
@layerCompression.setter
def layerCompression(self, value):
"""The method of compression to use when saving layers
(as opposed to saving composite data).
Valid only when `layers` = true.
"""
self.app.layerCompression = value
@property
def layers(self):
"""If true, the layers are saved."""
return self.app.layers
@layers.setter
def layers(self, value):
self.app.layers = value
@property
def saveImagePyramid(self):
"""If true, preserves multi-resolution information."""
return self.app.saveImagePyramid
@saveImagePyramid.setter
def saveImagePyramid(self, value):
self.app.saveImagePyramid = value
@property
def spotColors(self):
"""If true, spot colors are saved."""
return self.app.spotColors
@spotColors.setter
def spotColors(self, value):
self.app.spotColors = value
@property
def transparency(self):
return self.app.transparency
@transparency.setter
def transparency(self, value):
"""If true, saves the transparency as an additional alpha channel when
the file is opened in another application."""
self.app.transparency = value
|
wuapis.py | nsacyber/BAM | 125 | 11140831 | <filename>wuapis.py
import sqlite3
import logging, logging.handlers
import globs
import BamLogger
from db.bam_analysis_db import prodvgtebyname
from support.utils import verifyhex
_wulogger = logging.getLogger("BAM.wuapis")
def db_logconfig(queue):
global _wulogger
qh = logging.handlers.QueueHandler(queue)
_wulogger.addHandler(qh)
_wulogger.setLevel(logging.DEBUG)
def getsupersededfromfiledigest(filedigest):
'''
Lists all superseded updates
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getsupersededfromfile")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision as ffr '
'JOIN SUSDB.dbo.tbBundleDependency tbd ON tbd.BundledRevisionID = ffr.RevisionID '
'JOIN SUSDB.dbo.tbRevisionSupersedesUpdate rsu ON rsu.RevisionID = tbd.RevisionID '
'JOIN SUSDB.dbo.tbUpdate u ON u.UpdateID = rsu.SupersededUpdateID '
'WHERE ffr.FileDigest = {}').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getsupersededfromfile")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getsupersededfromfile (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getsupersededfromfiledigest_custom(filedigest):
'''
File to superseded updates; Determines if Digest is superseding (list all superseded updates for file if any)
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getsupersededfromfile")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;'
'DECLARE @supersededupdates table '
'(FileDigest varbinary(max), RevisionID INT, FileName varchar(max), '
'LegacyName varchar(max), SupersededUpdateID uniqueidentifier);'
'INSERT INTO @supersededupdates (FileDigest, RevisionID, FileName, LegacyName, SupersededUpdateID) '
'SELECT ffr.FileDigest, ffr.RevisionID, f.FileName, u.LegacyName, rsu.SupersededUpdateID '
'FROM SUSDB.dbo.tbFileForRevision as ffr '
'JOIN SUSDB.dbo.tbBundleDependency tbd ON tbd.BundledRevisionID = ffr.RevisionID '
'JOIN SUSDB.dbo.tbRevisionSupersedesUpdate rsu ON rsu.RevisionID = tbd.RevisionID '
'JOIN SUSDB.dbo.tbUpdate u ON u.UpdateID = rsu.SupersededUpdateID '
'JOIN SUSDB.dbo.tbFile f ON f.FileDigest = ffr.FileDigest '
'WHERE ffr.FileDigest = {};'
'SELECT * FROM @supersededupdates').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getsupersededfromfile")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getsupersededfromfile (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getsupersedingfromfile(filedigest):
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getsupersedingfromfile")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = (''
'SET NOCOUNT ON;DECLARE @supersedingupdates table (FileDigest varbinary(max), RevisionID INT, '
'LegacyName varchar(max), SuperRevisionID int);'
'INSERT INTO @supersedingupdates (FileDigest, RevisionID, LegacyName, SuperRevisionID) '
' SELECT ffr.FileDigest, ffr.RevisionID, u.LegacyName, rsu.RevisionID FROM SUSDB.dbo.tbFileForRevision as ffr'
' JOIN SUSDB.dbo.tbBundleDependency tbd ON tbd.BundledRevisionID = ffr.RevisionID'
' JOIN SUSDB.dbo.tbRevision r ON r.RevisionID = tbd.RevisionID'
' JOIN SUSDB.dbo.tbUpdate u ON u.LocalUpdateID = r.LocalUpdateID'
' JOIN SUSDB.dbo.tbRevisionSupersedesUpdate rsu ON rsu.SupersededUpdateID = u.UpdateID'
' WHERE ffr.FileDigest = {};'
'SELECT * FROM tbUpdate WHERE LocalUpdateID IN (SELECT LocalUpdateID FROM tbRevision'
' WHERE RevisionID IN (SELECT SuperRevisionID FROM @supersedingupdates));').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getsupersedingfromfile")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getsupersedingfromfile (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getfiledigestbattributeswodu(filedigest):
'''
Digest (cab/exe) to KB (file to KB) with other WSUS information (Bundled)
without DefinitionUpdates
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getfiledigestbattributeswodu")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision AS ffr '
'JOIN SUSDB.dbo.tbBundleDependency tbd ON tbd.BundledRevisionID = ffr.RevisionID '
'JOIN SUSDB.dbo.tbRevision r ON r.RevisionID = tbd.RevisionID '
'JOIN SUSDB.dbo.tbUpdate u ON u.LocalUpdateID = r.LocalUpdateID '
'JOIN SUSDB.PUBLIC_VIEWS.vUpdate vu ON vu.UpdateId = u.UpdateID '
"WHERE ffr.FileDigest = {} AND ClassificationId != 'E0789628-CE08-4437-BE74-2495B842F43B'").format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getfiledigestbattributeswodu")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getfiledigestbattributeswodu (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getfiledigestbattributes(filedigest):
'''
Digest (cab/exe) to KB (file to KB) with other WSUS information (Bundled)
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getfiledigestbattributes")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision AS ffr '
'JOIN SUSDB.dbo.tbBundleDependency tbd ON tbd.BundledRevisionID = ffr.RevisionID '
'JOIN SUSDB.dbo.tbRevision r ON r.RevisionID = tbd.RevisionID '
'JOIN SUSDB.dbo.tbUpdate u ON u.LocalUpdateID = r.LocalUpdateID '
'JOIN SUSDB.PUBLIC_VIEWS.vUpdate vu ON vu.UpdateId = u.UpdateID '
' WHERE ffr.FileDigest = {}').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getfiledigestbattributes")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getfiledigestbattributes (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getfiledigestattributeswodu(filedigest):
'''
Digest (cab/exe) to KB (file to KB) with other WSUS information
without DefinitionUpdates.
May return multiple results.
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getfiledigestattributeswodu")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision AS ffr'
' JOIN SUSDB.dbo.tbRevision r ON r.RevisionID = ffr.RevisionID'
' JOIN SUSDB.dbo.tbUpdate u ON u.LocalUpdateID = r.LocalUpdateID'
' JOIN SUSDB.PUBLIC_VIEWS.vUpdate vu ON vu.UpdateId = u.UpdateID'
" WHERE ffr.FileDigest = {} AND ClassificationId != 'E0789628-CE08-4437-BE74-2495B842F43B'").format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getfiledigestattributeswodu")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getfiledigestattributeswodu (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getfiledigestattributes(filedigest):
'''
Digest (cab/exe) to KB (file to KB) with other WSUS information.
May return multiple results.
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getfiledigestattributes")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision AS ffr'
' JOIN SUSDB.dbo.tbRevision r ON r.RevisionID = ffr.RevisionID'
' JOIN SUSDB.dbo.tbUpdate u ON u.LocalUpdateID = r.LocalUpdateID'
' JOIN SUSDB.PUBLIC_VIEWS.vUpdate vu ON vu.UpdateId = u.UpdateID'
' WHERE ffr.FileDigest = {}').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getfiledigestattributes")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getfiledigestattributes (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getfileattrbyfnprodv(filename, prodversion):
bamcursor = globs.DBCONN.cursor()
wsuscursor = globs.DBWSUSCONN.cursor()
filelist = prodvgtebyname(bamcursor, filename, prodversion)
hashlist = []
for row in filelist:
for column in row.keys():
if column == 'UpdateId':
result = findupdate(row[column])
if len(result) == 0:
continue
hexfiledigest = verifyhex("0x" + row[column])
if hexfiledigest is None:
_wulogger.log(logging.DEBUG,
"[WUAPIS] {} not valid hex: getfileattrbyfnprodv".format(row[column]))
continue
hashlist.append(hexfiledigest)
fileattrlist = []
for hash in hashlist:
r = getfiledigestbattributes(hash)
if len(r) == 0:
continue
fileattrlist.append(r)
bamcursor.close()
wsuscursor.close()
return fileattrlist
def findupdate(updateid):
global _wulogger
result = []
if not isinstance(updateid, str):
return result
wsuscursor = globs.DBWSUSCONN.cursor()
bamcursor = globs.DBCONN.cursor()
utbname = globs.UPDATEFILESDBNAME
check = bamcursor.execute("SELECT FileName FROM {} WHERE FileName = '{}'".format(utbname, updateid))
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from findupdate")
bamcursor.close()
wsuscursor.close()
return result
result = bamcursor.fetchall()
bamcursor.close()
wsuscursor.close()
return result
def getKBoffiledigest(filedigest):
'''
Digest (cab/exe) to KB (file to KB)
'''
global _wulogger
result = []
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getKBoffiledigest")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT * FROM SUSDB.dbo.tbFileForRevision as ffr '
'JOIN SUSDB.dbo.tbBundleDependency tbd ON ffr.RevisionID = tbd.BundledRevisionID '
'JOIN SUSDB.dbo.tbKBArticleForRevision kbfr ON kbfr.RevisionID = tbd.RevisionID '
'WHERE ffr.FileDigest = {}').format(hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getKBoffiledigest")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getKBoffiledigest (" + str(filedigest) + ")")
wsuscursor.close()
return result
def getKBtofiledigest(kbarticle):
'''
KB to file(s) without matching platform
'''
global _wulogger
result = []
try:
kbarticleint = int(kbarticle)
except ValueError:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid int: getKBtofiledigest")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT f.FileDigest, f.FileName, kbafr.KBArticleID '
'FROM SUSDB.dbo.tbKBArticleForRevision kbafr '
'JOIN SUSDB.dbo.tbBundleDependency bd ON kbafr.RevisionID = bd.RevisionID '
'JOIN SUSDB.dbo.tbFileForRevision ffr ON ffr.RevisionID = bd.BundledRevisionID '
'JOIN SUSDB.dbo.tbFile f ON f.FileDigest = ffr.FileDigest '
'WHERE kbafr.KBArticleID = {} ORDER BY FileDigest').format(str(kbarticle))
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getKBtofiledigest")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getKBtofiledigest (" + str(kbarticle) + ")")
wsuscursor.close()
return result
def getKBtoufiledigest(kbarticle, filedigest):
'''
KB to filedigest with matching platform
'''
global _wulogger
result = []
try:
kbarticleint = int(kbarticle)
except ValueError:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid int: getKBtofiledigest")
return result
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: getKBoffiledigest")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ('SET NOCOUNT ON;SELECT f.FileDigest, f.FileName, kbafr.KBArticleID '
'FROM SUSDB.dbo.tbKBArticleForRevision kbafr '
'JOIN SUSDB.dbo.tbBundleDependency bd ON kbafr.RevisionID = bd.RevisionID '
'JOIN SUSDB.dbo.tbFileForRevision ffr ON ffr.RevisionID = bd.BundledRevisionID '
'JOIN SUSDB.dbo.tbFile f ON f.FileDigest = ffr.FileDigest '
'WHERE kbafr.KBArticleID = {} AND ffr.FileDigest = {} '
'').format(str(kbarticle), hexfiledigest)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from getKBtofiledigest")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from getKBtofiledigest (" + str(kbarticle) + ")")
wsuscursor.close()
return result
def findfileswithkb(kbarticle):
'''
find files that have a filename with KB number in it. May not guarantee to capture all related files.
'''
global _wulogger
result = []
try:
kbarticleint = int(kbarticle)
except ValueError:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid int: findfileswithkb")
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ("SET NOCOUNT ON;SELECT FileName, FileDigest FROM SUSDB.dbo.tbFile "
"WHERE FileName collate SQL_Latin1_General_CP1_CI_AS LIKE '%{}%'").format(str(kbarticle))
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from findfileswithkb")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from findfileswithkb (" + str(kbarticle) + ")")
wsuscursor.close()
return result
def findupdateinfo(updateid):
global _wulogger
result = []
if not isinstance(updateid, str):
return result
wsuscursor = globs.DBWSUSCONN.cursor()
tsql = ("SET NOCOUNT ON;SELECT * FROM SUSDB.PUBLIC_VIEWS.vUpdate "
"WHERE UpdateId = CAST('{}' as uniqueidentifier)").format(updateid)
check = wsuscursor.execute(tsql)
if check is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] Did not find entries from findupdateinfo")
wsuscursor.close()
return result
result = wsuscursor.fetchall()
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from findupdateinfo" + "(" + str(updateid) +")")
wsuscursor.close()
return result
def kbtosupersedingkb(kbarticle, filedigest):
global _wulogger
result = []
try:
kbarticleint = int(kbarticle)
except ValueError:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid int: kbtosupersedingkb" + "(" + str(kbarticle) + ")")
return result
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: kbtosupersedingkb")
return result
updateinfo = []
fdlist = getKBtoufiledigest(kbarticle, hexfiledigest)
if len(fdlist) == 0:
return result
for filed in fdlist:
superfiles = getsupersedingfromfile(filed[0])
if len(superfiles) == 0:
continue
for superfile in superfiles:
uinfo = findupdateinfo(superfile[1])
if len(uinfo) == 0:
continue
if uinfo[0][13] is not None:
updateinfo.append(uinfo[0][13])
kbsorted = []
if len(updateinfo) != 0:
kbsorted = list(sorted(set(updateinfo)))
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from kbtosupersedingkb (" + str(kbarticle) + ")")
return kbsorted
def kbtosupersededkb(kbarticle, filedigest):
global _wulogger
result = []
try:
kbarticleint = int(kbarticle)
except ValueError:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid int: kbtosupersededkb")
return result
hexfiledigest = verifyhex(filedigest)
if hexfiledigest is None:
_wulogger.log(logging.DEBUG, "[WUAPIS] argument not valid hex: kbtosupersededkb")
return result
updateinfo = []
fdlist = getKBtoufiledigest(kbarticle, hexfiledigest)
if len(fdlist) == 0:
return result
for filed in fdlist:
superfiles = getsupersededfromfiledigest(filed[0])
if len(superfiles) == 0:
continue
for superfile in superfiles:
uinfo = findupdateinfo(superfile[6])
if len(uinfo) == 0:
continue
if uinfo[0][13] is not None:
updateinfo.append(uinfo[0][13])
kbsorted = []
if len(updateinfo) != 0:
kbsorted = list(sorted(set(updateinfo)))
_wulogger.log(logging.DEBUG, "[WUAPIS] Found entries from kbtosupersededkb (" + str(kbarticle) + ")")
return kbsorted
def updatewuentrysecedenceinfo():
bamcursor = globs.DBCONN.cursor()
bamcursor.execute("SELECT SHA1 FROM " + globs.UPDATEFILESDBNAME + " WHERE Seceding = '' OR SecededBy = ''")
result = bamcursor.fetchall()
count = 0
bamcursor.execute("BEGIN TRANSACTION")
for row in result:
for column in row.keys():
if str(row[column]) != 'None':
fattrs = getfiledigestbattributeswodu(row[column])
kbarticle = None
if len(fattrs) == 0: # Update is not part of a bundle
fattrs = getfiledigestattributeswodu(row[column])
if len(fattrs) == 0:
_wulogger.log(logging.DEBUG, "[WUAPIS] Possibly a DefinitionUpdate. Skipping...")
continue
kbarticle = fattrs[0][42]
else:
kbarticle = fattrs[0][44]
# check if file has an assoicated KB number
if kbarticle is not None:
superseding = kbtosupersedingkb(kbarticle, row[column])
superseded = kbtosupersededkb(kbarticle, row[column])
secededlist = ""
secedinglist = ""
if len(superseding) == 0 and len(superseded) == 0:
pass
elif len(superseding) == 0:
secededlist = ','.join(superseded)
bamcursor.execute(("UPDATE UpdateFiles"
" SET Seceding = "
"'{}' WHERE SHA1 = '{}'").format(secededlist, row[column]))
elif len(superseded) == 0:
secedinglist = ",".join(superseding)
bamcursor.execute(("UPDATE UpdateFiles"
" SET SecededBy = "
"'{}' WHERE SHA1 = '{}'").format(secedinglist, row[column]))
else:
bamcursor.execute(("UPDATE UpdateFiles"
" SET Seceding = "
"'{}' WHERE SHA1 = '{}'").format(secededlist, row[column]))
bamcursor.execute(("UPDATE UpdateFiles"
" SET SecededBy = "
"'{}' WHERE SHA1 = '{}'").format(secedinglist, row[column]))
count = count + 1
if count % 5000 == 0:
bamcursor.execute("END TRANSACTION")
bamcursor.execute("BEGIN TRANSACTION")
else:
_wulogger.log(logging.DEBUG, "[WUAPIS] Skipping no KB...")
bamcursor.execute("END TRANSACTION")
bamcursor.close()
return result
|
bin/update_readme_changelog.py | bbayles/cibuildwheel | 702 | 11140850 | <gh_stars>100-1000
#!/usr/bin/env python3
import re
import sys
from pathlib import Path
PROJECT_ROOT = Path(__file__).parent / ".."
CHANGELOG_FILE = PROJECT_ROOT / "docs" / "changelog.md"
README_FILE = PROJECT_ROOT / "README.md"
# https://regexr.com/622ds
FIRST_5_CHANGELOG_ENTRIES_REGEX = re.compile(r"""(^###.*?(?=###)){5}""", re.DOTALL | re.MULTILINE)
# https://regexr.com/622e5
README_CHANGELOG_SECTION = re.compile(
r"""(?<=<!-- START bin\/update_readme_changelog.py -->\n).*(?=<!-- END bin\/update_readme_changelog.py -->)""",
re.DOTALL,
)
def main():
changelog_text = CHANGELOG_FILE.read_text()
readme_text = README_FILE.read_text()
mini_changelog_match = FIRST_5_CHANGELOG_ENTRIES_REGEX.search(changelog_text)
assert mini_changelog_match, "Failed to find the first few changelog entries"
mini_changelog = "\n".join(
[
"",
"<!-- this section was generated by bin/update_readme_changelog.py -- do not edit manually -->",
"",
mini_changelog_match.group(0).strip(),
"",
"",
]
)
if not re.search(README_CHANGELOG_SECTION, readme_text):
sys.exit("Changelog section not found in README")
readme_text = re.sub(README_CHANGELOG_SECTION, mini_changelog, readme_text)
README_FILE.write_text(readme_text)
if __name__ == "__main__":
main()
|
recipes/Python/578199_Rookies_Backup_Program/recipe-578199.py | tdiprima/code | 2,023 | 11140859 | <reponame>tdiprima/code
import os
import sys
def main():
try:
source, destination = sys.argv[1:]
assert os.path.isdir(source), '<source_directory> is not a directory'
if os.path.exists(destination):
assert os.path.isdir(destination), \
'<destination_directory> is not a directory'
else:
os.makedirs(destination)
copy(source, destination)
except Exception, error:
program = 'USAGE: %s <source_directory> <destination_directory>' % \
os.path.basename(sys.argv[0])
problem = 'ERROR: %s' % error
divider = '=' * max(len(program), len(problem))
sys.stdout.write('\n%s\n%s\n%s\n' % (program, divider, problem))
def copy(source, destination):
for name in os.listdir(source):
source_name = os.path.join(source, name)
destination_name = os.path.join(destination, name)
try:
if os.path.isdir(source_name):
os.mkdir(destination_name)
copy(source_name, destination_name)
elif os.path.isfile(source_name):
file(destination_name, 'wb').write(
file(source_name, 'rb').read())
except:
sys.stderr.write('\n%s\n%s\n' % (source_name, destination_name))
if __name__ == '__main__':
main()
|
src/Functions/Web/Javbus.py | yepcn/javsdt | 1,064 | 11140872 | # -*- coding:utf-8 -*-
import re
import requests
from Class.MyEnum import ScrapeStatusEnum
# from traceback import format_exc
from Class.MyError import SpecifiedUrlError
# 搜索javbus,或请求javbus上jav所在网页,返回html
def get_bus_html(url, proxy):
for retry in range(10):
try:
if proxy: # existmag=all为了 获得所有影片,而不是默认的有磁力的链接
rqs = requests.get(url, proxies=proxy, timeout=(6, 7), headers={'Cookie': 'existmag=all'})
else:
rqs = requests.get(url, timeout=(6, 7), headers={'Cookie': 'existmag=all'})
except requests.exceptions.ProxyError:
# print(format_exc())
print(' >通过局部代理失败,重新尝试...')
continue
except:
# print(format_exc())
print(f' >打开网页失败,重新尝试...{url}')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if re.search(r'JavBus', rqs_content):
return rqs_content
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
input(f'>>请检查你的网络环境是否可以打开: {url}')
# 去javbus搜寻系列、在javbus的封面链接
# 返回: 系列名称,图片链接,状态码
def scrape_from_bus(jav_file, jav_model, url_bus, proxy):
status = ScrapeStatusEnum.bus_not_found
# 用户指定了网址,则直接得到jav所在网址
if '公交车' in jav_file.Name:
url_appointg = re.search(r'公交车(.+?)\.', jav_file.Name)
if url_appointg:
url_jav_bus = f'{url_bus}/{url_appointg.group(1)}'
html_jav_bus = get_bus_html(url_jav_bus, proxy)
if re.search(r'404 Page', html_jav_bus):
raise SpecifiedUrlError(f'你指定的javbus网址找不到jav: {url_jav_bus},')
jav_model.Javbus = url_appointg.group(1)
status = ScrapeStatusEnum.success
else:
# 指定的javlibrary网址有错误
raise SpecifiedUrlError(f'你指定的javbus网址有错误: ')
# 用户没有指定网址,则去搜索
else:
html_jav_bus = ''
# jav在javbus上的url,一般就是javbus网址/车牌
url_jav_bus = f'{url_bus}/{jav_file.Car_id}'
print(' >前往javbus: ', url_jav_bus)
# 获得影片在javbus上的网页
html_temp = get_bus_html(url_jav_bus, proxy)
if not re.search(r'404 Page', html_temp):
html_jav_bus = html_temp
jav_model.Javbus = jav_file.Car_id
status = ScrapeStatusEnum.success
# 这部jav在javbus的网址不简单
else:
# 还是老老实实去搜索
url_search_bus = f'{url_bus}/search/{jav_file.Car_id.replace("-", "")}&type=1&parent=ce'
print(' >搜索javbus: ', url_search_bus)
html_search_bus = get_bus_html(url_search_bus, proxy)
# 搜索结果的网页,大部分情况一个结果,也有可能是多个结果的网页
# 尝试找movie-box
list_search_results = re.findall(r'movie-box" href="(.+?)">', html_search_bus) # 匹配处理“标题”
if list_search_results:
pref = jav_file.Car_id.split('-')[0] # 匹配车牌的前缀字母
suf = jav_file.Car_id.split('-')[-1].lstrip('0') # 当前车牌的后缀数字 去除多余的0
list_fit_results = [] # 存放,车牌符合的结果
for i in list_search_results:
url_end = i.split('/')[-1].upper()
url_suf = re.search(r'[-_](\d+)', url_end).group(1).lstrip('0') # 匹配box上影片url,车牌的后缀数字,去除多余的0
if suf == url_suf: # 数字相同
url_pref = re.search(r'([A-Z]+2?8?)', url_end).group(1).upper() # 匹配处理url所带车牌前面的字母“n”
if pref == url_pref: # 数字相同的基础下,字母也相同,即可能车牌相同
list_fit_results.append(i)
# 有结果
if list_fit_results:
# 有多个结果,发个状态码,警告一下用户
if len(list_fit_results) > 1:
status = ScrapeStatusEnum.bus_multiple_search_results
else:
status = ScrapeStatusEnum.success
# 默认用第一个搜索结果
url_first_result = list_fit_results[0]
jav_model.Javbus = url_first_result
print(' >获取系列: ', url_first_result)
html_jav_bus = get_bus_html(url_first_result, proxy)
genres = []
if html_jav_bus:
# DVD封面cover
coverg = re.search(r'bigImage" href="/pics/cover/(.+?)"', html_jav_bus)
if coverg:
jav_model.CoverBus = coverg.group(1)
# 系列:</span> <a href="https://www.cdnbus.work/series/kpl">悪質シロウトナンパ</a>
seriesg = re.search(r'系列:</span> <a href=".+?">(.+?)</a>', html_jav_bus)
if seriesg and not jav_model.Series:
jav_model.Series = seriesg.group(1)
# 特点
genres = re.findall(r'gr_sel" value="\d+"><a href=".+">(.+?)</a>', html_jav_bus)
return status, genres
|
keras/keras/layers/core.py | molingbo/crcn | 167 | 11140880 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import theano
import theano.tensor as T
from .. import activations, initializations
from ..utils.theano_utils import shared_zeros, floatX
from ..utils.generic_utils import make_tuple
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from six.moves import zip
srng = RandomStreams()
class Layer(object):
def __init__(self):
self.params = []
def connect(self, previous_layer):
self.previous_layer = previous_layer
def output(self, train):
raise NotImplementedError
def get_input(self, train):
if hasattr(self, 'previous_layer'):
return self.previous_layer.output(train=train)
else:
return self.input
def set_weights(self, weights):
for p, w in zip(self.params, weights):
p.set_value(floatX(w))
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def get_config(self):
return {"name":self.__class__.__name__}
class Dropout(Layer):
'''
Hinton's dropout.
'''
def __init__(self, p):
super(Dropout,self).__init__()
self.p = p
def output(self, train):
X = self.get_input(train)
if self.p > 0.:
retain_prob = 1. - self.p
if train:
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
else:
X *= retain_prob
return X
def get_config(self):
return {"name":self.__class__.__name__,
"p":self.p}
class Activation(Layer):
'''
Apply an activation function to an output.
'''
def __init__(self, activation, target=0, beta=0.1):
super(Activation,self).__init__()
self.activation = activations.get(activation)
self.target = target
self.beta = beta
def output(self, train):
X = self.get_input(train)
return self.activation(X)
def get_config(self):
return {"name":self.__class__.__name__,
"activation":self.activation.__name__,
"target":self.target,
"beta":self.beta}
class Reshape(Layer):
'''
Reshape an output to a certain shape.
Can't be used as first layer in a model (no fixed input!)
First dimension is assumed to be nb_samples.
'''
def __init__(self, *dims):
super(Reshape,self).__init__()
self.dims = dims
def output(self, train):
X = self.get_input(train)
nshape = make_tuple(X.shape[0], *self.dims)
return theano.tensor.reshape(X, nshape)
def get_config(self):
return {"name":self.__class__.__name__,
"dims":self.dims}
class Flatten(Layer):
'''
Reshape input to flat shape.
First dimension is assumed to be nb_samples.
'''
def __init__(self):
super(Flatten,self).__init__()
def output(self, train):
X = self.get_input(train)
size = theano.tensor.prod(X.shape) // X.shape[0]
nshape = (X.shape[0], size)
return theano.tensor.reshape(X, nshape)
class RepeatVector(Layer):
'''
Repeat input n times.
Dimensions of input are assumed to be (nb_samples, dim).
Return tensor of shape (nb_samples, n, dim).
'''
def __init__(self, n):
super(RepeatVector,self).__init__()
self.n = n
def output(self, train):
X = self.get_input(train)
tensors = [X]*self.n
stacked = theano.tensor.stack(*tensors)
return stacked.dimshuffle((1,0,2))
def get_config(self):
return {"name":self.__class__.__name__,
"n":self.n}
class Dense(Layer):
'''
Just your regular fully connected NN layer.
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None):
super(Dense,self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.matrix()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.b]
self.regularizers = [W_regularizer, b_regularizer]
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def output(self, train):
X = self.get_input(train)
output = self.activation(T.dot(X, self.W) + self.b)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"activation":self.activation.__name__}
class TimeDistributedDense(Layer):
'''
Apply a same DenseLayer for each dimension[1] (shared_dimension) input
Especially useful after a recurrent network with 'return_sequence=True'
Tensor input dimensions: (nb_sample, shared_dimension, input_dim)
Tensor output dimensions: (nb_sample, shared_dimension, output_dim)
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None):
super(TimeDistributedDense,self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.b]
self.regularizers = [W_regularizer, b_regularizer]
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def output(self, train):
X = self.get_input(train)
def act_func(X):
return self.activation(T.dot(X, self.W) + self.b)
output, _ = theano.scan(fn = act_func,
sequences = X.dimshuffle(1,0,2),
outputs_info=None)
return output.dimshuffle(1,0,2)
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"activation":self.activation.__name__}
|
hata/ext/commands_v2/utils.py | Multiface24111/hata | 173 | 11140901 | <gh_stars>100-1000
__all__ = ()
def raw_name_to_display(raw_name):
"""
Converts the given raw command or it's parameter's name to it's display name.
Parameters
----------
raw_name : `str`
The name to convert.
Returns
-------
display_name : `str`
The converted name.
"""
return '-'.join([w for w in raw_name.strip('_ ').lower().replace(' ', '-').replace('_', '-').split('-') if w])
def normalize_description(description):
"""
Normalizes a docstrings.
Parameters
----------
description : `str` or `Any`
The docstring to clear.
Returns
-------
cleared : `str` or `Any`
The cleared docstring. If `docstring` was given as `None` or is detected as empty, will return `None`.
"""
if (description is None) or (not isinstance(description, str)):
return description
lines = description.splitlines()
for index in reversed(range(len(lines))):
line = lines[index]
line = line.strip()
if line:
lines[index] = line
else:
del lines[index]
if not lines:
return None
return ' '.join(lines)
|
models/datasets/hd5.py | povezava/pytorch_GAN_zoo | 1,545 | 11140928 | <reponame>povezava/pytorch_GAN_zoo
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import torch
import h5py
import copy
from .utils.db_stats import buildKeyOrder
class H5Dataset(torch.utils.data.Dataset):
def __init__(self,
file_path,
partition_path=None,
partition_value=None,
transform=None,
specificAttrib=None,
stats_file=None,
pathDBMask=None):
super(H5Dataset, self).__init__()
self.path = file_path
self.partition_path = partition_path
self.partition_value = partition_value
if self.partition_value is None:
self.partition_path = None
print("No partition value found, ignoring the partition file")
self.h5_file = None
self.partition_file = None
self.transform = transform
self.attribKeys = copy.deepcopy(specificAttrib)
self.statsData = None
self.totAttribSize = 0
if stats_file is not None:
with open(stats_file, 'rb') as file:
self.statsData = json.load(file)
if self.partition_value is None and "GLOBAL" in self.statsData:
self.statsData = self.statsData["GLOBAL"]
elif self.partition_value in self.statsData:
self.statsData = self.statsData[self.partition_value]
self.buildAttribShift()
self.pathDBMask = pathDBMask
self.maskFile = None
def __getitem__(self, index):
if self.h5_file is None:
self.h5_file = h5py.File(self.path, 'r')
if self.partition_path is not None:
self.partition_file = h5py.File(self.partition_path, 'r')
if self.partition_file is not None:
index = self.partition_file[self.partition_value][index]
img = self.h5_file['input_image'][index]
if self.transform is not None:
img = self.transform(img)
if self.statsData is not None:
attr = [None for x in range(self.totAttribSize)]
for key in self.attribKeys:
label = str(self.h5_file[key][index][0])
shift = self.attribShift[key]
attr[shift] = self.attribShiftVal[key][label]
else:
attr = [0]
if self.pathDBMask is not None:
if self.maskFile is None:
self.maskFile = h5py.File(self.pathDBMask, 'r')
mask = self.maskFile["mask"][index]
mask = self.transform(mask)
img = img * (mask + 1.0) * 0.5 + (1 - mask) * 0.5
return img, torch.tensor(attr), mask
return img, torch.tensor(attr)
def __len__(self):
if self.partition_path is None:
with h5py.File(self.path, 'r') as db:
lens = len(db['input_image'])
else:
with h5py.File(self.partition_path, 'r') as db:
lens = len(db[self.partition_value])
return lens
def getName(self, index):
if self.partition_path is not None:
if self.partition_file is None:
self.partition_file = h5py.File(self.partition_path, 'r')
return self.partition_file[self.partition_value][index]
return index
def buildAttribShift(self):
self.attribShift = None
self.attribShiftVal = None
if self.statsData is None:
return
if self.attribKeys is None:
self.attribKeys = [x for x in self.statsData.keys() if
x != "totalSize"]
self.attribShift = {}
self.attribShiftVal = {}
self.totAttribSize = 0
for key in self.attribKeys:
self.attribShift[key] = self.totAttribSize
self.attribShiftVal[key] = {
name: c
for c, name in enumerate(list(self.statsData[key].keys()))}
self.totAttribSize += 1
def getKeyOrders(self, equlizationWeights=False):
if equlizationWeights:
raise ValueError("Equalization weight not implemented yet")
return buildKeyOrder(self.attribShift, self.attribShiftVal)
|
tools/accuracy_checker/accuracy_checker/adapters/pose_estimation_openpose.py | APrigarina/open_model_zoo | 1,031 | 11140932 | <gh_stars>1000+
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
try:
from numpy.core.umath import clip
except ImportError:
from numpy import clip
from ..adapters import Adapter
from ..config import ConfigValidator, StringField, ConfigError, NumberField
from ..representation import PoseEstimationPrediction
from ..utils import contains_any, UnsupportedPackage
try:
from skimage.measure import block_reduce
except ImportError as import_error:
block_reduce = UnsupportedPackage('skimage.measure', import_error.msg)
class OpenPoseAdapter(Adapter):
__provider__ = 'human_pose_estimation_openpose'
prediction_types = (PoseEstimationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'part_affinity_fields_out': StringField(
description="Name of output layer with keypoints pairwise relations (part affinity fields).",
optional=True
),
'keypoints_heatmap_out': StringField(
description="Name of output layer with keypoints heatmaps.", optional=True
),
'upscale_factor': NumberField(
description="Upscaling factor for output feature maps before postprocessing.",
value_type=float, min_value=1, default=1, optional=True
),
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.WARN_ON_EXTRA_ARGUMENT
)
def configure(self):
self.upscale_factor = self.get_value_from_config('upscale_factor')
self.part_affinity_fields = self.get_value_from_config('part_affinity_fields_out')
self.keypoints_heatmap = self.get_value_from_config('keypoints_heatmap_out')
self.concat_out = self.part_affinity_fields is None and self.keypoints_heatmap is None
if not self.concat_out:
contains_both = self.part_affinity_fields is not None and self.keypoints_heatmap is not None
if not contains_both:
raise ConfigError(
'human_pose_estimation adapter should contains both: keypoints_heatmap_out '
'and part_affinity_fields_out or not contain them at all (in single output model case)'
)
self._keypoints_heatmap_bias = self.keypoints_heatmap + '/add_'
self._part_affinity_fields_bias = self.part_affinity_fields + '/add_'
self.decoder = OpenPoseDecoder(num_joints=18, delta=0.5 if self.upscale_factor == 1 else 0.0)
if isinstance(block_reduce, UnsupportedPackage):
block_reduce.raise_error(self.__provider__)
self.nms = HeatmapNMS(kernel=2 * int(np.round(6 / 7 * self.upscale_factor)) + 1)
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.concat_out:
if not contains_any(raw_outputs, [self.part_affinity_fields, self._part_affinity_fields_bias]):
raise ConfigError('part affinity fields output not found')
if not contains_any(raw_outputs, [self.keypoints_heatmap, self._keypoints_heatmap_bias]):
raise ConfigError('keypoints heatmap output not found')
keypoints_heatmap = raw_outputs[
self.keypoints_heatmap if self.keypoints_heatmap in raw_outputs else self._keypoints_heatmap_bias
]
pafs = raw_outputs[
self.part_affinity_fields if self.part_affinity_fields in raw_outputs
else self._part_affinity_fields_bias
]
raw_output = zip(identifiers, keypoints_heatmap, pafs, frame_meta)
else:
concat_out = raw_outputs[self.output_blob]
keypoints_num = concat_out.shape[1] // 3
keypoints_heat_map = concat_out[:, :keypoints_num, :]
pafs = concat_out[:, keypoints_num:, :]
raw_output = zip(identifiers, keypoints_heat_map, pafs, frame_meta)
for identifier, heatmap, paf, meta in raw_output:
output_h, output_w = heatmap.shape[-2:]
if self.upscale_factor > 1:
self.decoder.delta = 0
heatmap = np.transpose(heatmap, (1, 2, 0))
heatmap = cv2.resize(heatmap, (0, 0), fx=self.upscale_factor, fy=self.upscale_factor,
interpolation=cv2.INTER_CUBIC)
heatmap = np.transpose(heatmap, (2, 0, 1))
paf = np.transpose(np.squeeze(paf), (1, 2, 0))
paf = cv2.resize(paf, (0, 0), fx=self.upscale_factor, fy=self.upscale_factor,
interpolation=cv2.INTER_CUBIC)
paf = np.transpose(paf, (2, 0, 1))
hmap = heatmap[None]
nms_hmap = self.nms(hmap)
poses, scores = self.decoder(hmap, nms_hmap, paf[None])
if len(scores) == 0:
result.append(PoseEstimationPrediction(
identifier,
np.empty((0, 17), dtype=float),
np.empty((0, 17), dtype=float),
np.empty((0, 17), dtype=float),
np.empty((0, ), dtype=float)
))
continue
poses = poses.astype(float)
scores = np.asarray(scores).astype(float)
scale_x = meta['scale_x']
scale_y = meta['scale_y']
input_h, input_w = next(iter(meta['input_shape'].values()))[-2:]
output_scale_x = input_w / output_w
output_scale_y = input_h / output_h
poses[:, :, 0] *= output_scale_x / self.upscale_factor / scale_x
poses[:, :, 1] *= output_scale_y / self.upscale_factor / scale_y
point_scores = poses[:, :, 2]
result.append(PoseEstimationPrediction(
identifier,
poses[:, :, 0],
poses[:, :, 1],
point_scores,
scores))
return result
class HeatmapNMS:
def __init__(self, kernel):
self.kernel = kernel
self.pad = (kernel - 1) // 2
def max_pool(self, x):
# Max pooling kernel x kernel with stride 1 x 1.
k = self.kernel
p = self.pad
pooled = np.zeros_like(x)
hmap = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)))
h, w = hmap.shape[-2:]
for i in range(k):
n = (h - i) // k * k
for j in range(k):
m = (w - j) // k * k
hmap_slice = hmap[..., i:i + n, j:j + m]
pooled[..., i::k, j::k] = block_reduce(hmap_slice, (1, 1, k, k), np.max)
return pooled
def __call__(self, heatmaps):
pooled = self.max_pool(heatmaps)
return heatmaps * (pooled == heatmaps).astype(heatmaps.dtype)
class OpenPoseDecoder:
BODY_PARTS_KPT_IDS = ((1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6, 7), (1, 8), (8, 9), (9, 10), (1, 11),
(11, 12), (12, 13), (1, 0), (0, 14), (14, 16), (0, 15), (15, 17), (2, 16), (5, 17))
BODY_PARTS_PAF_IDS = (12, 20, 14, 16, 22, 24, 0, 2, 4, 6, 8, 10, 28, 30, 34, 32, 36, 18, 26)
def __init__(self, num_joints=18, skeleton=BODY_PARTS_KPT_IDS, paf_indices=BODY_PARTS_PAF_IDS,
max_points=100, score_threshold=0.1, min_paf_alignment_score=0.05, delta=0.5):
self.num_joints = num_joints
self.skeleton = skeleton
self.paf_indices = paf_indices
self.max_points = max_points
self.score_threshold = score_threshold
self.min_paf_alignment_score = min_paf_alignment_score
self.delta = delta
self.points_per_limb = 10
self.grid = np.arange(self.points_per_limb, dtype=np.float32).reshape(1, -1, 1)
def __call__(self, heatmaps, nms_heatmaps, pafs):
batch_size, _, h, w = heatmaps.shape
assert batch_size == 1, 'Batch size of 1 only supported'
keypoints = self.extract_points(heatmaps, nms_heatmaps)
pafs = np.transpose(pafs, (0, 2, 3, 1))
if self.delta > 0:
for kpts in keypoints:
kpts[:, :2] += self.delta
clip(kpts[:, 0], 0, w - 1, out=kpts[:, 0])
clip(kpts[:, 1], 0, h - 1, out=kpts[:, 1])
pose_entries, keypoints = self.group_keypoints(keypoints, pafs, pose_entry_size=self.num_joints + 2)
poses, scores = self.convert_to_coco_format(pose_entries, keypoints)
if len(poses) > 0:
poses = np.asarray(poses, dtype=np.float32)
poses = poses.reshape((poses.shape[0], -1, 3))
else:
poses = np.empty((0, 17, 3), dtype=np.float32)
scores = np.empty(0, dtype=np.float32)
return poses, scores
def extract_points(self, heatmaps, nms_heatmaps):
batch_size, channels_num, h, w = heatmaps.shape
assert batch_size == 1, 'Batch size of 1 only supported'
assert channels_num >= self.num_joints
xs, ys, scores = self.top_k(nms_heatmaps)
masks = scores > self.score_threshold
all_keypoints = []
keypoint_id = 0
for k in range(self.num_joints):
# Filter low-score points.
mask = masks[0, k]
x = xs[0, k][mask].ravel()
y = ys[0, k][mask].ravel()
score = scores[0, k][mask].ravel()
n = len(x)
if n == 0:
all_keypoints.append(np.empty((0, 4), dtype=np.float32))
continue
# Apply quarter offset to improve localization accuracy.
x, y = self.refine(heatmaps[0, k], x, y)
clip(x, 0, w - 1, out=x)
clip(y, 0, h - 1, out=y)
# Pack resulting points.
keypoints = np.empty((n, 4), dtype=np.float32)
keypoints[:, 0] = x
keypoints[:, 1] = y
keypoints[:, 2] = score
keypoints[:, 3] = np.arange(keypoint_id, keypoint_id + n)
keypoint_id += n
all_keypoints.append(keypoints)
return all_keypoints
def top_k(self, heatmaps):
N, K, _, W = heatmaps.shape
heatmaps = heatmaps.reshape(N, K, -1)
# Get positions with top scores.
ind = heatmaps.argpartition(-self.max_points, axis=2)[:, :, -self.max_points:]
scores = np.take_along_axis(heatmaps, ind, axis=2)
# Keep top scores sorted.
subind = np.argsort(-scores, axis=2)
ind = np.take_along_axis(ind, subind, axis=2)
scores = np.take_along_axis(scores, subind, axis=2)
y, x = np.divmod(ind, W)
return x, y, scores
@staticmethod
def refine(heatmap, x, y):
h, w = heatmap.shape[-2:]
valid = np.logical_and(np.logical_and(x > 0, x < w - 1), np.logical_and(y > 0, y < h - 1))
xx = x[valid]
yy = y[valid]
dx = np.sign(heatmap[yy, xx + 1] - heatmap[yy, xx - 1], dtype=np.float32) * 0.25
dy = np.sign(heatmap[yy + 1, xx] - heatmap[yy - 1, xx], dtype=np.float32) * 0.25
x = x.astype(np.float32)
y = y.astype(np.float32)
x[valid] += dx
y[valid] += dy
return x, y
@staticmethod
def is_disjoint(pose_a, pose_b):
pose_a = pose_a[:-2]
pose_b = pose_b[:-2]
return np.all(np.logical_or.reduce((pose_a == pose_b, pose_a < 0, pose_b < 0)))
def update_poses(self, kpt_a_id, kpt_b_id, all_keypoints, connections, pose_entries, pose_entry_size):
for connection in connections:
pose_a_idx = -1
pose_b_idx = -1
for j, pose in enumerate(pose_entries):
if pose[kpt_a_id] == connection[0]:
pose_a_idx = j
if pose[kpt_b_id] == connection[1]:
pose_b_idx = j
if pose_a_idx < 0 and pose_b_idx < 0:
# Create new pose entry.
pose_entry = np.full(pose_entry_size, -1, dtype=np.float32)
pose_entry[kpt_a_id] = connection[0]
pose_entry[kpt_b_id] = connection[1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connection[0:2], 2]) + connection[2]
pose_entries.append(pose_entry)
elif pose_a_idx >= 0 and pose_b_idx >= 0 and pose_a_idx != pose_b_idx:
# Merge two poses are disjoint merge them, otherwise ignore connection.
pose_a = pose_entries[pose_a_idx]
pose_b = pose_entries[pose_b_idx]
if self.is_disjoint(pose_a, pose_b):
pose_a += pose_b
pose_a[:-2] += 1
pose_a[-2] += connection[2]
del pose_entries[pose_b_idx]
elif pose_a_idx >= 0 and pose_b_idx >= 0:
# Adjust score of a pose.
pose_entries[pose_a_idx][-2] += connection[2]
elif pose_a_idx >= 0:
# Add a new limb into pose.
pose = pose_entries[pose_a_idx]
if pose[kpt_b_id] < 0:
pose[-2] += all_keypoints[connection[1], 2]
pose[kpt_b_id] = connection[1]
pose[-2] += connection[2]
pose[-1] += 1
elif pose_b_idx >= 0:
# Add a new limb into pose.
pose = pose_entries[pose_b_idx]
if pose[kpt_a_id] < 0:
pose[-2] += all_keypoints[connection[0], 2]
pose[kpt_a_id] = connection[0]
pose[-2] += connection[2]
pose[-1] += 1
return pose_entries
@staticmethod
def connections_nms(a_idx, b_idx, affinity_scores):
# From all retrieved connections that share starting/ending keypoints leave only the top-scoring ones.
order = affinity_scores.argsort()[::-1]
affinity_scores = affinity_scores[order]
a_idx = a_idx[order]
b_idx = b_idx[order]
idx = []
has_kpt_a = set()
has_kpt_b = set()
for t, (i, j) in enumerate(zip(a_idx, b_idx)):
if i not in has_kpt_a and j not in has_kpt_b:
idx.append(t)
has_kpt_a.add(i)
has_kpt_b.add(j)
idx = np.asarray(idx, dtype=np.int32)
return a_idx[idx], b_idx[idx], affinity_scores[idx]
def group_keypoints(self, all_keypoints_by_type, pafs, pose_entry_size=20):
all_keypoints = np.concatenate(all_keypoints_by_type, axis=0)
pose_entries = []
# For every limb.
for part_id, paf_channel in enumerate(self.paf_indices):
kpt_a_id, kpt_b_id = self.skeleton[part_id]
kpts_a = all_keypoints_by_type[kpt_a_id]
kpts_b = all_keypoints_by_type[kpt_b_id]
n = len(kpts_a)
m = len(kpts_b)
if n == 0 or m == 0:
continue
# Get vectors between all pairs of keypoints, i.e. candidate limb vectors.
a = kpts_a[:, :2]
a = np.broadcast_to(a[None], (m, n, 2))
b = kpts_b[:, :2]
vec_raw = (b[:, None, :] - a).reshape(-1, 1, 2)
# Sample points along every candidate limb vector.
steps = (1 / (self.points_per_limb - 1) * vec_raw)
points = steps * self.grid + a.reshape(-1, 1, 2)
points = points.round().astype(dtype=np.int32)
x = points[..., 0].ravel()
y = points[..., 1].ravel()
# Compute affinity score between candidate limb vectors and part affinity field.
part_pafs = pafs[0, :, :, paf_channel:paf_channel + 2]
field = part_pafs[y, x].reshape(-1, self.points_per_limb, 2)
vec_norm = np.linalg.norm(vec_raw, ord=2, axis=-1, keepdims=True)
vec = vec_raw / (vec_norm + 1e-6)
affinity_scores = (field * vec).sum(-1).reshape(-1, self.points_per_limb)
valid_affinity_scores = affinity_scores > self.min_paf_alignment_score
valid_num = valid_affinity_scores.sum(1)
affinity_scores = (affinity_scores * valid_affinity_scores).sum(1) / (valid_num + 1e-6)
success_ratio = valid_num / self.points_per_limb
# Get a list of limbs according to the obtained affinity score.
valid_limbs = np.where(np.logical_and(affinity_scores > 0, success_ratio > 0.8))[0]
if len(valid_limbs) == 0:
continue
b_idx, a_idx = np.divmod(valid_limbs, n)
affinity_scores = affinity_scores[valid_limbs]
# Suppress incompatible connections.
a_idx, b_idx, affinity_scores = self.connections_nms(a_idx, b_idx, affinity_scores)
connections = list(zip(kpts_a[a_idx, 3].astype(np.int32),
kpts_b[b_idx, 3].astype(np.int32),
affinity_scores))
if len(connections) == 0:
continue
# Update poses with new connections.
pose_entries = self.update_poses(kpt_a_id, kpt_b_id, all_keypoints,
connections, pose_entries, pose_entry_size)
# Remove poses with not enough points.
pose_entries = np.asarray(pose_entries, dtype=np.float32).reshape(-1, pose_entry_size)
pose_entries = pose_entries[pose_entries[:, -1] >= 3]
return pose_entries, all_keypoints
@staticmethod
def convert_to_coco_format(pose_entries, all_keypoints):
num_joints = 17
coco_keypoints = []
scores = []
for pose in pose_entries:
if len(pose) == 0:
continue
keypoints = np.zeros(num_joints * 3)
reorder_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
person_score = pose[-2]
for keypoint_id, target_id in zip(pose[:-2], reorder_map):
if target_id < 0:
continue
cx, cy, score = 0, 0, 0 # keypoint not found
if keypoint_id != -1:
cx, cy, score = all_keypoints[int(keypoint_id), 0:3]
keypoints[target_id * 3 + 0] = cx
keypoints[target_id * 3 + 1] = cy
keypoints[target_id * 3 + 2] = score
coco_keypoints.append(keypoints)
scores.append(person_score * max(0, (pose[-1] - 1))) # -1 for 'neck'
return np.asarray(coco_keypoints), np.asarray(scores)
|
tests/test_gail.py | HighExecutor/stable-baselines | 222 | 11140938 | import os
import shutil
import gym
import numpy as np
import pytest
from stable_baselines import (A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2,
TD3, TRPO, SAC)
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.vec_env import VecFrameStack, DummyVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.callbacks import CheckpointCallback
from stable_baselines.gail import ExpertDataset, generate_expert_traj
EXPERT_PATH_PENDULUM = "stable_baselines/gail/dataset/expert_pendulum.npz"
EXPERT_PATH_DISCRETE = "stable_baselines/gail/dataset/expert_cartpole.npz"
@pytest.mark.parametrize("expert_env", [('Pendulum-v0', EXPERT_PATH_PENDULUM, True),
('CartPole-v1', EXPERT_PATH_DISCRETE, False)])
def test_gail(tmp_path, expert_env):
env_id, expert_path, load_from_memory = expert_env
env = gym.make(env_id)
traj_data = None
if load_from_memory:
traj_data = np.load(expert_path)
expert_path = None
dataset = ExpertDataset(traj_data=traj_data, expert_path=expert_path, traj_limitation=10,
sequential_preprocessing=True)
# Note: train for 1M steps to have a working policy
model = GAIL('MlpPolicy', env, adversary_entcoeff=0.0, lam=0.92, max_kl=0.001,
expert_dataset=dataset, hidden_size_adversary=64, verbose=0)
model.learn(300)
model.save(str(tmp_path / "GAIL-{}".format(env_id)))
model = model.load(str(tmp_path / "GAIL-{}".format(env_id)), env=env)
model.learn(300)
evaluate_policy(model, env, n_eval_episodes=5)
del dataset, model
@pytest.mark.parametrize("generate_env", [
(SAC, 'MlpPolicy', 'Pendulum-v0', 1, 10),
(DQN, 'MlpPolicy', 'CartPole-v1', 1, 10),
(A2C, 'MlpLstmPolicy', 'Pendulum-v0', 1, 10),
(A2C, 'MlpLstmPolicy', 'CartPole-v1', 1, 10),
(A2C, 'CnnPolicy', 'BreakoutNoFrameskip-v4', 8, 1),
])
def test_generate(tmp_path, generate_env):
model, policy, env_name, n_env, n_episodes = generate_env
if n_env > 1:
env = make_atari_env(env_name, num_env=n_env, seed=0)
model = model(policy, env, verbose=0)
else:
model = model(policy, env_name, verbose=0)
dataset = generate_expert_traj(model, str(tmp_path / 'expert'), n_timesteps=300, n_episodes=n_episodes,
image_folder=str(tmp_path / 'test_recorded_images'))
assert set(dataset.keys()).issuperset(['actions', 'obs', 'rewards', 'episode_returns', 'episode_starts'])
assert sum(dataset['episode_starts']) == n_episodes
assert len(dataset['episode_returns']) == n_episodes
n_timesteps = len(dataset['episode_starts'])
for key, val in dataset.items():
if key != 'episode_returns':
assert val.shape[0] == n_timesteps, "inconsistent number of timesteps at '{}'".format(key)
dataset_loaded = np.load(str(tmp_path / 'expert.npz'), allow_pickle=True)
assert dataset.keys() == dataset_loaded.keys()
for key in dataset.keys():
assert (dataset[key] == dataset_loaded[key]).all(), "different data at '{}'".format(key)
# Cleanup folder
if os.path.isdir(str(tmp_path / 'test_recorded_images')):
shutil.rmtree(str(tmp_path / 'test_recorded_images'))
def test_generate_callable(tmp_path):
"""
Test generating expert trajectories with a callable.
"""
env = gym.make("CartPole-v1")
# Here the expert is a random agent
def dummy_expert(_obs):
return env.action_space.sample()
generate_expert_traj(dummy_expert, tmp_path / 'dummy_expert_cartpole', env, n_timesteps=0, n_episodes=10)
@pytest.mark.xfail(reason="Not Enough Memory", strict=False)
def test_pretrain_images(tmp_path):
env = make_atari_env("PongNoFrameskip-v4", num_env=1, seed=0)
env = VecFrameStack(env, n_stack=3)
model = PPO2('CnnPolicy', env)
generate_expert_traj(model, str(tmp_path / 'expert_pong'), n_timesteps=0, n_episodes=1,
image_folder=str(tmp_path / 'pretrain_recorded_images'))
expert_path = str(tmp_path / 'expert_pong.npz')
dataset = ExpertDataset(expert_path=expert_path, traj_limitation=1, batch_size=32,
sequential_preprocessing=True)
model.pretrain(dataset, n_epochs=2)
shutil.rmtree(str(tmp_path / 'pretrain_recorded_images'))
env.close()
del dataset, model, env
def test_gail_callback(tmp_path):
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = GAIL("MlpPolicy", "Pendulum-v0", dataset)
checkpoint_callback = CheckpointCallback(save_freq=150, save_path=str(tmp_path / 'logs/gail/'), name_prefix='gail')
model.learn(total_timesteps=301, callback=checkpoint_callback)
shutil.rmtree(str(tmp_path / 'logs/gail/'))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACKTR, GAIL, DDPG, PPO1, PPO2, SAC, TD3, TRPO])
def test_behavior_cloning_box(tmp_path, model_class):
"""
Behavior cloning with continuous actions.
"""
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "Pendulum-v0")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACER, ACKTR, DQN, GAIL, PPO1, PPO2, TRPO])
def test_behavior_cloning_discrete(tmp_path, model_class):
dataset = ExpertDataset(expert_path=EXPERT_PATH_DISCRETE, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "CartPole-v1")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
def test_dataset_param_validation():
with pytest.raises(ValueError):
ExpertDataset()
traj_data = np.load(EXPERT_PATH_PENDULUM)
with pytest.raises(ValueError):
ExpertDataset(traj_data=traj_data, expert_path=EXPERT_PATH_PENDULUM)
def test_generate_vec_env_non_image_observation():
env = DummyVecEnv([lambda: gym.make('CartPole-v1')] * 2)
model = PPO2('MlpPolicy', env)
model.learn(total_timesteps=300)
generate_expert_traj(model, save_path='.', n_timesteps=0, n_episodes=5)
|
containers/compilers/rump/python3/python-wrapper/python-wrapper-udp.py | urjitbhatia/unik | 1,237 | 11140941 | from io import StringIO
import sys
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
###From https://github.com/bmc/grizzled-python/blob/bf9998bd0f6497d1e368610f439f9085d019bf76/grizzled/io/__init__.py
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import zipfile
class MultiWriter(object):
"""
Wraps multiple file-like objects so that they all may be written at once.
For example, the following code arranges to have anything written to
``sys.stdout`` go to ``sys.stdout`` and to a temporary file:
.. python::
import sys
from grizzled.io import MultiWriter
sys.stdout = MultiWriter(sys.__stdout__, open('/tmp/log', 'w'))
"""
def __init__(self, *args):
"""
Create a new ``MultiWriter`` object to wrap one or more file-like
objects.
:Parameters:
args : iterable
One or more file-like objects to wrap
"""
self.__files = args
def write(self, buf):
"""
Write the specified buffer to the wrapped files.
:Parameters:
buf : str or bytes
buffer to write
"""
for f in self.__files:
f.write(buf)
def flush(self):
"""
Force a flush.
"""
for f in self.__files:
f.flush()
def close(self):
"""
Close all contained files.
"""
for f in self.__files:
f.close()
logsbuf = StringIO()
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = self._stringioout = MultiWriter(logsbuf, self._stdout)
sys.stderr = self._stringioerr = MultiWriter(logsbuf, self._stderr)
return self
def __exit__(self, *args):
self.extend(logsbuf.getvalue().splitlines())
self.extend(logsbuf.getvalue().splitlines())
sys.stdout = self._stdout
sys.stderr = self._stderr
CONST_PORT=9967
# HTTPRequestHandler class
class LogServer(BaseHTTPRequestHandler):
# GET
def do_GET(self):
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = logsbuf.getvalue()
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
def run():
print('starting server on port', CONST_PORT)
server_address = ('0.0.0.0', CONST_PORT)
httpd = HTTPServer(server_address, LogServer)
print('running server...')
httpd.serve_forever()
with Capturing():
os.chdir("/bootpart")
t = threading.Thread(target = run)
t.daemon = True
t.start()
import main.py ##NAME OF SCRIPT GOES HERE
|
tools/telemetry/telemetry/page/page_test_results.py | iplo/Chain | 231 | 11140948 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import logging
import sys
import traceback
import unittest
class PageTestResults(unittest.TestResult):
def __init__(self, output_stream=None):
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self.pages_that_had_errors = set()
self.pages_that_had_failures = set()
self.successes = []
self.skipped = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def pages_that_had_errors_or_failures(self):
return self.pages_that_had_errors.union(
self.pages_that_had_failures)
def _exc_info_to_string(self, err, test):
if isinstance(test, unittest.TestCase):
return super(PageTestResults, self)._exc_info_to_string(err, test)
else:
return ''.join(traceback.format_exception(*err))
def addSuccess(self, test):
self.successes.append(test)
def addSkip(self, test, reason): # Python 2.7 has this in unittest.TestResult
self.skipped.append((test, reason))
def StartTest(self, page):
self.startTest(page.display_name)
def StopTest(self, page):
self.stopTest(page.display_name)
def AddError(self, page, err):
self.pages_that_had_errors.add(page)
self.addError(page.display_name, err)
def AddFailure(self, page, err):
self.pages_that_had_failures.add(page)
self.addFailure(page.display_name, err)
def AddSuccess(self, page):
self.addSuccess(page.display_name)
def AddSkip(self, page, reason):
self.addSkip(page.display_name, reason)
def AddFailureMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddFailure(page, sys.exc_info())
def AddErrorMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddError(page, sys.exc_info())
def PrintSummary(self):
if self.failures:
logging.error('Failed pages:\n%s', '\n'.join(zip(*self.failures)[0]))
if self.errors:
logging.error('Errored pages:\n%s', '\n'.join(zip(*self.errors)[0]))
if self.skipped:
logging.warning('Skipped pages:\n%s', '\n'.join(zip(*self.skipped)[0]))
|
tridet/utils/tensor2d.py | flipson/dd3d | 227 | 11140974 | <reponame>flipson/dd3d
# Copyright 2021 Toyota Research Institute. All rights reserved.
import torch
import torch.nn.functional as F
def compute_features_locations(h, w, stride, dtype=torch.float32, device='cpu', offset="none"):
"""Adapted from AdelaiDet:
https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
Key differnece: offset is configurable.
"""
shifts_x = torch.arange(0, w * stride, step=stride, dtype=dtype, device=device)
shifts_y = torch.arange(0, h * stride, step=stride, dtype=dtype, device=device)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
# (dennis.park)
# locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = torch.stack((shift_x, shift_y), dim=1)
if offset == "half":
locations += stride // 2
else:
assert offset == "none"
return locations
def aligned_bilinear(tensor, factor, offset="none"):
"""Adapted from AdelaiDet:
https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
"""
assert tensor.dim() == 4
assert factor >= 1
assert int(factor) == factor
if factor == 1:
return tensor
h, w = tensor.size()[2:]
tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode="replicate")
oh = factor * h + 1
ow = factor * w + 1
tensor = F.interpolate(tensor, size=(oh, ow), mode='bilinear', align_corners=True)
if offset == "half":
tensor = F.pad(tensor, pad=(factor // 2, 0, factor // 2, 0), mode="replicate")
return tensor[:, :, :oh - 1, :ow - 1]
|
ratelimit/models.py | sobolevn/django-ratelimit | 712 | 11140977 | # This module intentionally left blank.
|
core/simulators/srunner/scenarios/opposite_direction.py | timothijoe/DI-drive | 219 | 11140994 | <reponame>timothijoe/DI-drive
import math
import py_trees
import carla
from six.moves.queue import Queue # pylint: disable=relative-import
from core.simulators.carla_data_provider import CarlaDataProvider
from core.simulators.srunner.scenariomanager.scenarioatomics.atomic_behaviors import (
ActorTransformSetter, ActorDestroy, ActorSource, ActorSink, WaypointFollower
)
from core.simulators.srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest
from core.simulators.srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import DriveDistance
from core.simulators.srunner.scenarios.basic_scenario import BasicScenario
from core.simulators.srunner.tools.scenario_helper import get_waypoint_in_distance
class OppositeDirection(BasicScenario):
"""
"Vehicle Maneuvering In Opposite Direction" (Traffic Scenario 05)
This is a single ego vehicle scenario
"""
def __init__(
self,
world,
ego_vehicles,
config,
randomize=False,
debug_mode=False,
criteria_enable=True,
obstacle_type='barrier',
timeout=120
):
"""
Setup all relevant parameters and create scenario
obstacle_type -> flag to select type of leading obstacle. Values: vehicle, barrier
"""
self._world = world
self._map = CarlaDataProvider.get_map()
self._ego_vehicle_drive_distance = 100
self._opposite_speed = 5.56 # m/s
self._source_gap = 10 # m
self._source_transform = None
self._sink_location = None
self._blackboard_queue_name = 'ManeuverOppositeDirection/actor_flow_queue'
self._queue = py_trees.blackboard.Blackboard().set(self._blackboard_queue_name, Queue())
self._obstacle_type = obstacle_type
self._other_actor_transform = None
# Timeout of scenario in seconds
self.timeout = timeout
super(OppositeDirection, self).__init__(
"OppositeDirection", ego_vehicles, config, world, debug_mode, criteria_enable=criteria_enable
)
def _initialize_actors(self, config):
"""
Custom initialization
"""
other_actor_transform = config.other_actors[0].transform
other_actor_waypoint = self._map.get_waypoint(other_actor_transform.location)
first_vehicle_transform = carla.Transform(
carla.Location(
config.other_actors[0].transform.location.x, config.other_actors[0].transform.location.y,
config.other_actors[0].transform.location.z
), config.other_actors[0].transform.rotation
)
other_actor = CarlaDataProvider.request_new_actor(config.other_actors[0].model, other_actor_transform)
other_actor.set_transform(first_vehicle_transform)
other_actor.set_simulate_physics(enabled=False)
self.other_actors.append(other_actor)
self._source_transform = other_actor_transform
sink_waypoint = other_actor_waypoint.next(1)[0]
while not sink_waypoint.is_intersection:
sink_waypoint = sink_waypoint.next(1)[0]
while sink_waypoint.is_intersection:
sink_waypoint = sink_waypoint.next(1)[0]
while not sink_waypoint.is_intersection:
sink_waypoint = sink_waypoint.next(1)[0]
self._sink_location = sink_waypoint.transform.location
self._other_actor_transform = other_actor_transform
def _create_behavior(self):
"""
The behavior tree returned by this method is as follows:
The ego vehicle is trying to pass a leading vehicle in the same lane
by moving onto the oncoming lane while another vehicle is moving in the
opposite direction in the oncoming lane.
"""
# Leaf nodes
actor_source = ActorSource(
['vehicle.audi.tt', 'vehicle.tesla.model3', 'vehicle.nissan.micra'], self._source_transform,
self._source_gap, self._blackboard_queue_name
)
actor_sink = ActorSink(self._sink_location, 10)
ego_drive_distance = DriveDistance(self.ego_vehicles[0], self._ego_vehicle_drive_distance)
waypoint_follower = WaypointFollower(
self.other_actors[0],
self._opposite_speed,
blackboard_queue_name=self._blackboard_queue_name,
avoid_collision=True
)
# Non-leaf nodes
parallel_root = py_trees.composites.Parallel(policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# Building tree
parallel_root.add_child(ego_drive_distance)
parallel_root.add_child(actor_source)
parallel_root.add_child(actor_sink)
parallel_root.add_child(waypoint_follower)
scenario_sequence = py_trees.composites.Sequence()
scenario_sequence.add_child(ActorTransformSetter(self.other_actors[0], self._other_actor_transform))
scenario_sequence.add_child(parallel_root)
scenario_sequence.add_child(ActorDestroy(self.other_actors[0]))
return scenario_sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicles[0])
criteria.append(collision_criterion)
return criteria
def __del__(self):
"""
Remove all actors upon deletion
"""
self.remove_all_actors()
|
codigo_das_aulas/aula_17/exemplo_04.py | VeirichR/curso-python-selenium | 234 | 11141006 | <reponame>VeirichR/curso-python-selenium<gh_stars>100-1000
from selene.support.shared import browser
base_url = 'http://google.com'
browser.config.base_url = base_url
browser.open('')
# browser.element('input[name="q"]').type('Live de python')
browser.element(
'//*[@name="q"]'
).type('Live de python').press_enter()
|
Src/StdLib/Lib/site-packages/win32com/test/policySemantics.py | cwensley/ironpython2 | 1,078 | 11141027 | <reponame>cwensley/ironpython2
import win32com.server.util
import win32com.client
import pythoncom
import winerror
import win32com.test.util
import unittest
class Error(Exception):
pass
# An object representing a list of numbers
class PythonSemanticClass:
_public_methods_ = ["In"] # DISPIDs are allocated.
_dispid_to_func_ = { 10: 'Add', 11:'Remove'} # DISPIDs specified by the object.
def __init__(self):
self.list = []
def _NewEnum(self):
return win32com.server.util.NewEnum(self.list)
def _value_(self):
# should return an array.
return self.list
def _Evaluate(self):
# return the sum
return sum(self.list)
def In(self, value):
return value in self.list
def Add(self, value):
self.list.append(value)
def Remove(self, value):
self.list.remove(value)
def DispExTest(ob):
if not __debug__: print "WARNING: Tests dressed up as assertions are being skipped!"
assert ob.GetDispID("Add", 0)==10, "Policy did not honour the dispid"
# Not impl
# assert ob.GetMemberName(10, 0)=="add", "Policy did not give me the correct function for the dispid"
assert ob.GetDispID("Remove", 0)==11, "Policy did not honour the dispid"
assert ob.GetDispID("In", 0)==1000, "Allocated dispid unexpected value"
assert ob.GetDispID("_NewEnum", 0)==pythoncom.DISPID_NEWENUM, "_NewEnum() got unexpected DISPID"
dispids = []
dispid = -1
while 1:
try:
dispid = ob.GetNextDispID(0, dispid)
dispids.append(dispid)
except pythoncom.com_error, (hr, desc, exc, arg):
assert hr==winerror.S_FALSE, "Bad result at end of enum"
break
dispids.sort()
if dispids != [pythoncom.DISPID_EVALUATE, pythoncom.DISPID_NEWENUM, 10, 11, 1000]:
raise Error("Got back the wrong dispids: %s" % dispids)
def SemanticTest(ob):
# First just check our object "generally" as expected.
ob.Add(1)
ob.Add(2)
ob.Add(3)
# invoke _value_
if ob() != (1,2,3):
raise Error("Bad result - got %s" % (repr(ob())))
dispob = ob._oleobj_
rc = dispob.Invoke(pythoncom.DISPID_EVALUATE, 0, pythoncom.DISPATCH_METHOD|pythoncom.DISPATCH_PROPERTYGET, 1)
if rc != 6:
raise Error("Evaluate returned %d" % rc)
class Tester(win32com.test.util.TestCase):
def setUp(self):
debug=0
import win32com.server.dispatcher
if debug:
dispatcher=win32com.server.dispatcher.DefaultDebugDispatcher
else:
dispatcher=None
disp = win32com.server.util.wrap(PythonSemanticClass(), useDispatcher=dispatcher)
self.ob = win32com.client.Dispatch(disp)
def tearDown(self):
self.ob = None
def testSemantics(self):
SemanticTest(self.ob)
def testIDispatchEx(self):
dispexob = self.ob._oleobj_.QueryInterface(pythoncom.IID_IDispatchEx)
DispExTest(dispexob)
if __name__=='__main__':
unittest.main()
|
ML/Pytorch/object_detection/YOLO/dataset.py | xuyannus/Machine-Learning-Collection | 3,094 | 11141034 | <filename>ML/Pytorch/object_detection/YOLO/dataset.py<gh_stars>1000+
"""
Creates a Pytorch dataset to load the Pascal VOC dataset
"""
import torch
import os
import pandas as pd
from PIL import Image
class VOCDataset(torch.utils.data.Dataset):
def __init__(
self, csv_file, img_dir, label_dir, S=7, B=2, C=20, transform=None,
):
self.annotations = pd.read_csv(csv_file)
self.img_dir = img_dir
self.label_dir = label_dir
self.transform = transform
self.S = S
self.B = B
self.C = C
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
boxes = []
with open(label_path) as f:
for label in f.readlines():
class_label, x, y, width, height = [
float(x) if float(x) != int(float(x)) else int(x)
for x in label.replace("\n", "").split()
]
boxes.append([class_label, x, y, width, height])
img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
image = Image.open(img_path)
boxes = torch.tensor(boxes)
if self.transform:
# image = self.transform(image)
image, boxes = self.transform(image, boxes)
# Convert To Cells
label_matrix = torch.zeros((self.S, self.S, self.C + 5 * self.B))
for box in boxes:
class_label, x, y, width, height = box.tolist()
class_label = int(class_label)
# i,j represents the cell row and cell column
i, j = int(self.S * y), int(self.S * x)
x_cell, y_cell = self.S * x - j, self.S * y - i
"""
Calculating the width and height of cell of bounding box,
relative to the cell is done by the following, with
width as the example:
width_pixels = (width*self.image_width)
cell_pixels = (self.image_width)
Then to find the width relative to the cell is simply:
width_pixels/cell_pixels, simplification leads to the
formulas below.
"""
width_cell, height_cell = (
width * self.S,
height * self.S,
)
# If no object already found for specific cell i,j
# Note: This means we restrict to ONE object
# per cell!
if label_matrix[i, j, 20] == 0:
# Set that there exists an object
label_matrix[i, j, 20] = 1
# Box coordinates
box_coordinates = torch.tensor(
[x_cell, y_cell, width_cell, height_cell]
)
label_matrix[i, j, 21:25] = box_coordinates
# Set one hot encoding for class_label
label_matrix[i, j, class_label] = 1
return image, label_matrix
|
src/python3/request/item_create_link.py | meson800/onedrive-sdk-python | 912 | 11141038 | <reponame>meson800/onedrive-sdk-python
# -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from ..model.permission import Permission
from ..request_base import RequestBase
from ..request_builder_base import RequestBuilderBase
from ..options import *
import json
import asyncio
class ItemCreateLinkRequest(RequestBase):
def __init__(self, request_url, client, options, type):
super(ItemCreateLinkRequest, self).__init__(request_url, client, options)
self.method = "POST"
self.body_options={}
if type:
self.body_options["type"] = type
@property
def body_options(self):
return self._body_options
@body_options.setter
def body_options(self, value):
self._body_options=value
def post(self):
"""Sends the POST request
Returns:
:class:`Permission<onedrivesdk.model.permission.Permission>`:
The resulting entity from the operation
"""
self.content_type = "application/json"
entity = Permission(json.loads(self.send(self.body_options).content))
return entity
@asyncio.coroutine
def post_async(self):
"""Sends the POST request using an asyncio coroutine
Yields:
:class:`Permission<onedrivesdk.model.permission.Permission>`:
The resulting entity from the operation
"""
future = self._client._loop.run_in_executor(None,
self.post)
entity = yield from future
return entity
class ItemCreateLinkRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client, type):
super(ItemCreateLinkRequestBuilder, self).__init__(request_url, client)
self._method_options = {}
self._method_options["type"] = type
def request(self, options=None):
"""Builds the request for the ItemCreateLink
Args:
options (list of :class:`Option<onedrivesdk.options.Option>`):
Default to None, list of options to include in the request
Returns:
:class:`ItemCreateLinkRequest<onedrivesdk.request.item_create_link.ItemCreateLinkRequest>`:
The request
"""
req = ItemCreateLinkRequest(self._request_url, self._client, options, self._method_options["type"])
return req
def post(self):
"""Sends the POST request
Returns:
:class:`Permission<onedrivesdk.model.permission.Permission>`:
The resulting Permission from the operation
"""
return self.request().post()
@asyncio.coroutine
def post_async(self):
"""Sends the POST request using an asyncio coroutine
Yields:
:class:`Permission<onedrivesdk.model.permission.Permission>`:
The resulting Permission from the operation
"""
entity = yield from self.request().post_async()
return entity
|
Utils/Storage/BaseOSS.py | Caius-Lu/Savior | 108 | 11141047 | <reponame>Caius-Lu/Savior
import io
from abc import ABC, abstractmethod
import cv2
import msgpack
import msgpack_numpy as m
import numpy as np
from PIL import Image
from Utils.Exceptions import ImageFileSizeAbnormalException, ImageClassNotSupportToEncodeException
from Utils.misc import convert_pil_to_numpy
class CloudObjectStorage(ABC):
def __init__(self, _endpoint, _access_key, _secret_key):
self.endpoint = _endpoint
self.access_key = _access_key
self.secret_key = _secret_key
@abstractmethod
def create_bucket(self, _bucket_name):
pass
@abstractmethod
def download_data(self, _bucket_name, _object_path):
pass
@abstractmethod
def upload_data(self, _bucket_name, _object_path, _to_upload_object_bytes):
pass
@abstractmethod
def get_retrieve_url(self, _bucket_name, _object_path, _expire_seconds=86400 * 7):
pass
@abstractmethod
def check_file_exist(self, _bucket_name, _object_path):
pass
@staticmethod
def _image_object_encode(_m_img, _enable_compress, _quality_rate):
"""
对图像object进行编码
:param _m_img: 图像(numpy或者PIL)
:param _enable_compress: 是否需要压缩
:param _quality_rate: 具体压缩比例(0-100)
:return: 字节流
"""
if isinstance(_m_img, np.ndarray):
if not _enable_compress:
to_upload_img_bytes = io.BytesIO(cv2.imencode('.png', _m_img)[1])
else:
# webp的效率是png的1/3,但是比类似质量的图像小10倍
to_upload_img_bytes = io.BytesIO(
cv2.imencode('.webp', _m_img, [cv2.IMWRITE_WEBP_QUALITY, _quality_rate])[1]
)
elif isinstance(_m_img, Image.Image):
to_upload_img_bytes = io.BytesIO()
if not _enable_compress:
_m_img.save(to_upload_img_bytes, format='PNG')
else:
_m_img.save(to_upload_img_bytes, format='WEBP', quality=_quality_rate)
to_upload_img_bytes.seek(0)
else:
raise ImageClassNotSupportToEncodeException(f'{type(_m_img)} not support now')
return to_upload_img_bytes
@staticmethod
def _image_object_decode(_img_object_bytes, _image_size_threshold=10):
"""
对图像进行解码
:param _img_object_bytes: 图像字节
:param _image_size_threshold: 图像大小阈值,单位KB
:return: 解码后的numpy数组
"""
image_file_stream = io.BytesIO(_img_object_bytes)
img_pil = Image.open(image_file_stream)
request_image = convert_pil_to_numpy(img_pil)
if _image_size_threshold and request_image.nbytes < 1024 * _image_size_threshold:
raise ImageFileSizeAbnormalException('图像过小,可能不是正常图片')
return request_image
@staticmethod
def _general_numpy_object_encode(_to_encode_array):
return io.BytesIO(msgpack.packb(_to_encode_array, default=m.encode))
@staticmethod
def _general_numpy_object_decode(_to_decode_bytes):
return msgpack.unpackb(_to_decode_bytes, object_hook=m.decode)
def download_image_file(self, _bucket_name, _object_path, _image_size_threshold=None):
return self._image_object_decode(self.download_data(_bucket_name, _object_path), _image_size_threshold)
def download_numpy_array(self, _bucket_name, _object_path):
return self._general_numpy_object_decode(self.download_data(_bucket_name, _object_path))
def upload_image_file(self, _bucket_name, _object_path, _image, _enable_compress=True, _quality_rate=90):
return self.upload_data(_bucket_name, _object_path + ('.webp' if _enable_compress else '.png'),
self._image_object_encode(_image, _enable_compress, _quality_rate)
)
def upload_numpy_array(self, _bucket_name, _object_path, _np_array):
return self.upload_data(_bucket_name, _object_path,
self._general_numpy_object_encode(_np_array)
)
|
fbmessenger/sender_actions.py | Stanwarr/fbmessenger | 120 | 11141054 | <filename>fbmessenger/sender_actions.py
class SenderAction(object):
SENDER_ACTIONS = [
'mark_seen',
'typing_on',
'typing_off'
]
def __init__(self, sender_action):
if sender_action not in self.SENDER_ACTIONS:
raise ValueError('Invalid sender_action provided.')
self.sender_action = sender_action
def to_dict(self):
return self.sender_action
|
src/genie/libs/parser/iosxe/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 11141064 | <filename>src/genie/libs/parser/iosxe/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py
expected_output = {
"rapid_pvst": {
"forwarding_delay": 15,
"hello_time": 2,
"hold_count": 6,
"max_age": 20,
"vlans": {
1: {
"aging_timer": 480,
"bridge_address": "000e.39ff.71a2",
"bridge_priority": 24576,
"bridge_sysid": 1,
"forwarding_delay": 15,
"hello_time": 2,
"hello_timer": 0,
"hold_count": 6,
"hold_time": 1,
"interfaces": {
"Port-channel220": {
"cost": 1,
"counters": {"bpdu_received": 0, "bpdu_sent": 20120147},
"designated_bridge_address": "000e.39ff.71a2",
"designated_bridge_priority": 24577,
"designated_path_cost": 0,
"designated_port_id": "128.1671",
"designated_root_address": "000e.39ff.71a2",
"designated_root_priority": 24577,
"forward_delay": 0,
"hold": 0,
"link_type": "point-to-point",
"message_age": 0,
"name": "Port-channel220",
"number_of_forward_transitions": 1,
"port_identifier": "128.1671.",
"port_num": 1671,
"port_priority": 128,
"status": "designated forwarding",
},
"Port-channel265": {
"cost": 3,
"counters": {"bpdu_received": 0, "bpdu_sent": 21320048},
"designated_bridge_address": "000e.39ff.71a2",
"designated_bridge_priority": 24577,
"designated_path_cost": 0,
"designated_port_id": "128.1673",
"designated_root_address": "000e.39ff.71a2",
"designated_root_priority": 24577,
"forward_delay": 0,
"hold": 0,
"link_type": "point-to-point",
"message_age": 0,
"name": "Port-channel265",
"number_of_forward_transitions": 1,
"port_identifier": "128.1673.",
"port_num": 1673,
"port_priority": 128,
"status": "designated forwarding",
},
},
"max_age": 20,
"notification_timer": 0,
"notification_times": 2,
"root_of_spanning_tree": True,
"time_since_topology_change": "38w1d",
"topology_change_flag": False,
"topology_change_timer": 0,
"topology_change_times": 35,
"topology_changes": 10,
"topology_detected_flag": False,
"topology_from_port": "GigabitEthernet8/10",
"vlan_id": 1,
},
115: {
"aging_timer": 480,
"bridge_address": "000e.39ff.71a2",
"bridge_priority": 24576,
"bridge_sysid": 115,
"forwarding_delay": 15,
"hello_time": 2,
"hello_timer": 0,
"hold_count": 6,
"hold_time": 1,
"interfaces": {
"Port-channel210": {
"cost": 2,
"counters": {"bpdu_received": 4, "bpdu_sent": 10172865},
"designated_bridge_address": "000e.39ff.71a2",
"designated_bridge_priority": 24691,
"designated_path_cost": 0,
"designated_port_id": "128.1670",
"designated_root_address": "000e.39ff.71a2",
"designated_root_priority": 24691,
"forward_delay": 0,
"hold": 0,
"link_type": "point-to-point",
"message_age": 0,
"name": "Port-channel210",
"number_of_forward_transitions": 1,
"port_identifier": "128.1670.",
"port_num": 1670,
"port_priority": 128,
"status": "designated forwarding",
}
},
"max_age": 20,
"notification_timer": 0,
"notification_times": 2,
"root_of_spanning_tree": True,
"time_since_topology_change": "33w6d",
"topology_change_flag": False,
"topology_change_timer": 0,
"topology_change_times": 35,
"topology_changes": 2,
"topology_detected_flag": False,
"topology_from_port": "Port-channel210",
"vlan_id": 115,
},
116: {
"aging_timer": 480,
"bridge_address": "000e.39ff.71a2",
"bridge_priority": 24576,
"bridge_sysid": 116,
"forwarding_delay": 15,
"hello_time": 2,
"hello_timer": 0,
"hold_count": 6,
"hold_time": 1,
"interfaces": {
"Port-channel210": {
"cost": 2,
"counters": {"bpdu_received": 4, "bpdu_sent": 10172829},
"designated_bridge_address": "000e.39ff.71a2",
"designated_bridge_priority": 24692,
"designated_path_cost": 0,
"designated_port_id": "128.1670",
"designated_root_address": "000e.39ff.71a2",
"designated_root_priority": 24692,
"forward_delay": 0,
"hold": 0,
"link_type": "point-to-point",
"message_age": 0,
"name": "Port-channel210",
"number_of_forward_transitions": 1,
"port_identifier": "128.1670.",
"port_num": 1670,
"port_priority": 128,
"status": "designated forwarding",
}
},
"max_age": 20,
"notification_timer": 0,
"notification_times": 2,
"root_of_spanning_tree": True,
"time_since_topology_change": "33w6d",
"topology_change_flag": False,
"topology_change_timer": 0,
"topology_change_times": 35,
"topology_changes": 2,
"topology_detected_flag": False,
"topology_from_port": "Port-channel210",
"vlan_id": 116,
},
118: {
"aging_timer": 480,
"bridge_address": "000e.39ff.71a2",
"bridge_priority": 24576,
"bridge_sysid": 118,
"forwarding_delay": 15,
"hello_time": 2,
"hello_timer": 0,
"hold_count": 6,
"hold_time": 1,
"interfaces": {
"Port-channel210": {
"cost": 2,
"counters": {"bpdu_received": 4, "bpdu_sent": 10172791},
"designated_bridge_address": "000e.39ff.71a2",
"designated_bridge_priority": 24694,
"designated_path_cost": 0,
"designated_port_id": "128.1670",
"designated_root_address": "000e.39ff.71a2",
"designated_root_priority": 24694,
"forward_delay": 0,
"hold": 0,
"link_type": "point-to-point",
"message_age": 0,
"name": "Port-channel210",
"number_of_forward_transitions": 1,
"port_identifier": "128.1670.",
"port_num": 1670,
"port_priority": 128,
"status": "designated forwarding",
}
},
"max_age": 20,
"notification_timer": 0,
"notification_times": 2,
"root_of_spanning_tree": True,
"time_since_topology_change": "33w6d",
"topology_change_flag": False,
"topology_change_timer": 0,
"topology_change_times": 35,
"topology_changes": 2,
"topology_detected_flag": False,
"topology_from_port": "Port-channel210",
"vlan_id": 118,
},
},
}
}
|
trieste/acquisition/multi_objective/pareto.py | satrialoka/trieste | 119 | 11141090 | <reponame>satrialoka/trieste
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains functions and classes for Pareto based multi-objective optimization. """
from __future__ import annotations
import tensorflow as tf
from ...types import TensorType
from .dominance import non_dominated
from .partition import prepare_default_non_dominated_partition_bounds
class Pareto:
"""
A :class:`Pareto` constructs a Pareto set.
Stores a Pareto set and calculates hypervolume of the Pareto set given a
specified reference point
"""
def __init__(
self,
observations: TensorType,
):
"""
:param observations: The observations for all objectives, with shape [N, D].
:raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.
"""
tf.debugging.assert_rank(observations, 2)
tf.debugging.assert_greater_equal(tf.shape(observations)[-1], 2)
self.front = non_dominated(observations)[0]
def hypervolume_indicator(self, reference: TensorType) -> TensorType:
"""
Calculate the hypervolume indicator based on self.front and a reference point
The hypervolume indicator is the volume of the dominated region.
:param reference: a reference point to use, with shape [D].
Defines the upper bound of the hypervolume.
Should be equal or bigger than the anti-ideal point of the Pareto set.
For comparing results across runs, the same reference point must be used.
:return: hypervolume indicator, if reference point is less than all of the front
in any dimension, the hypervolume indicator will be zero.
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
shape.
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``self.front`` is empty
(which can happen if the concentration point is too strict so no frontier
exists after the screening)
"""
if tf.equal(tf.size(self.front), 0):
raise ValueError("empty front cannot be used to calculate hypervolume indicator")
helper_anti_reference = tf.reduce_min(self.front, axis=0) - tf.ones(
shape=1, dtype=self.front.dtype
)
lower, upper = prepare_default_non_dominated_partition_bounds(
reference, self.front, helper_anti_reference
)
non_dominated_hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 1))
hypervolume_indicator = (
tf.reduce_prod(reference - helper_anti_reference) - non_dominated_hypervolume
)
return hypervolume_indicator
def get_reference_point(front: TensorType) -> TensorType:
"""
reference point calculation method. Note if the front only contains one point, this reference
point calculation method will result a reference point the same as the input.
:param front: Pareto front referred to calculate the reference point, with shape [..., N, D]
:return: a reference point to use, with shape [..., D].
:raise ValueError: If ``front`` is empty
"""
if tf.equal(tf.size(front), 0):
raise ValueError("empty front cannot be used to calculate reference point")
f = tf.math.reduce_max(front, axis=-2) - tf.math.reduce_min(front, axis=-2)
return tf.math.reduce_max(front, axis=-2) + 2 * f / tf.cast(tf.shape(front)[-2], f.dtype)
|
bdd100k/__init__.py | Celeven1996/bdd100k | 193 | 11141151 | """BDD100K python toolkit."""
|
evaluation/ci-for-ml/ci-simulation-repeated/1_normal_approx.py | rasbt/machine-learning-notes | 131 | 11141153 | <gh_stars>100-1000
import argparse
from get_dataset import get_dataset
from sklearn.tree import DecisionTreeClassifier
import scipy.stats
import numpy as np
def run_method(num_repetitions):
is_inside_list = []
for i in range(num_repetitions):
X_train, y_train, X_test, y_test, X_huge_test, y_huge_test = get_dataset(
random_seed=i
)
clf = DecisionTreeClassifier(random_state=123, max_depth=3)
clf.fit(X_train, y_train)
acc_test_true = clf.score(X_huge_test, y_huge_test)
#####################################################
# Compute CI
#####################################################
confidence = 0.95 # Change to your desired confidence level
z_value = scipy.stats.norm.ppf((1 + confidence) / 2.0)
acc_test = clf.score(X_test, y_test)
ci_length = z_value * np.sqrt((acc_test * (1 - acc_test)) / y_test.shape[0])
ci_lower = acc_test - ci_length
ci_upper = acc_test + ci_length
# Check CI
is_inside = acc_test_true >= ci_lower and acc_test_true <= ci_upper
is_inside_list.append(is_inside)
return is_inside_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--repetitions",
required=True,
type=int,
)
args = parser.parse_args()
is_inside_list = run_method(args.repetitions)
print(
f"{np.mean(is_inside_list)*100}% of 95% confidence"
" intervals contain the true accuracy."
)
|
soft_renderer/functional/projection.py | bala1144/SoftRas | 201 | 11141165 | import torch
def projection(vertices, P, dist_coeffs, orig_size):
'''
Calculate projective transformation of vertices given a projection matrix
P: 3x4 projection matrix
dist_coeffs: vector of distortion coefficients
orig_size: original size of image captured by the camera
'''
vertices = torch.cat([vertices, torch.ones_like(vertices[:, :, None, 0])], dim=-1)
vertices = torch.bmm(vertices, P.transpose(2,1))
x, y, z = vertices[:, :, 0], vertices[:, :, 1], vertices[:, :, 2]
x_ = x / (z + 1e-5)
y_ = y / (z + 1e-5)
# Get distortion coefficients from vector
k1 = dist_coeffs[:, None, 0]
k2 = dist_coeffs[:, None, 1]
p1 = dist_coeffs[:, None, 2]
p2 = dist_coeffs[:, None, 3]
k3 = dist_coeffs[:, None, 4]
# we use x_ for x' and x__ for x'' etc.
r = torch.sqrt(x_ ** 2 + y_ ** 2)
x__ = x_*(1 + k1*(r**2) + k2*(r**4) + k3*(r**6)) + 2*p1*x_*y_ + p2*(r**2 + 2*x_**2)
y__ = y_*(1 + k1*(r**2) + k2*(r**4) + k3 *(r**6)) + p1*(r**2 + 2*y_**2) + 2*p2*x_*y_
x__ = 2 * (x__ - orig_size / 2.) / orig_size
y__ = 2 * (y__ - orig_size / 2.) / orig_size
vertices = torch.stack([x__,y__,z], dim=-1)
return vertices |
siren_pytorch/__init__.py | lucidrains/SIREN-pytorch | 293 | 11141183 | <filename>siren_pytorch/__init__.py
from siren_pytorch.siren_pytorch import Sine, Siren, SirenNet, SirenWrapper
|
memshell.py | google/Legilimency | 116 | 11141186 | <reponame>google/Legilimency
# Legilimency - Memory Analysis Framework for iOS
# --------------------------------------
#
# Written and maintained by <NAME> <<EMAIL>>
#
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket, sys, struct, math, time
from defs import *
from symbols import *
from MemClient import MemClient
def main():
#Creating the needed clients
print "Connecting..."
client = MemClient(sys.argv[1], DEFAULT_PORT)
print "Connected!"
#Your code here... :-)
if __name__ == "__main__":
main()
|
plato/agent/conversational_agent/conversational_agent.py | avmi/plato-research-dialogue-system | 899 | 11141193 | <reponame>avmi/plato-research-dialogue-system
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from abc import ABC, abstractmethod
"""
agent is the parent abstract class of all
Conversational Agents. It defines the interface that the
controller will use.
"""
class ConversationalAgent(ABC):
"""
Abstract class defining what it means to be a Conversational Agent
"""
@abstractmethod
def initialize(self):
"""
Initialize internal structures of a Conversational Agent
:return: nothing
"""
pass
@abstractmethod
def start_dialogue(self, **kwargs):
"""
Reset or initialize internal structures at the beginning of the
dialogue. May issue first utterance if this agent has the initiative.
:param kwargs:
:return:
"""
pass
@abstractmethod
def continue_dialogue(self, **kwargs):
"""
Perform one dialogue turn.
:param kwargs:
:return: this agent's output (dialogue acts, text, speech, statistics)
"""
pass
@abstractmethod
def end_dialogue(self):
"""
End the current dialogue and train
:return: nothing
"""
pass
@abstractmethod
def terminated(self):
"""
Check if this agent is at a terminal state.
:return: True or False
"""
pass
|
tests/test_general/settings.py | agilentia/django-salesforce | 251 | 11141234 | from django.utils.crypto import get_random_string
SECRET_KEY = get_random_string(length=32)
SF_CAN_RUN_WITHOUT_DJANGO = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.