filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_20800 | # //////////////////////// Node Class //////////////////////////
class Node:
def __init__(self, value):
self.value = value
self.next = None
# //////////////////////// Stack Class //////////////////////////
class Stack:
def __init__(self):
self.top = None
def push(self, value):
# Create New Node
node = Node(value)
if self.top:
node.next = self.top
self.top = node
def pop(self):
if self.top == None:
raise AttributeError("Stack is empty")
temp = self.top.value
self.top = self.top.next
return(temp)
def peek(self):
if self.top == None:
raise AttributeError("Stack is empty.")
return self.top.value
def isEmpty(self):
return self.top == None
# //////////////////////// PseudoQueue Class //////////////////////////
class PseudoQueue:
def __init__(self):
self.stack_En=Stack()
self.stack_De=Stack()
def enqueue(self,value):
self.stack_En.push(value)
def dequeue(self):
pop_item = None
if self.stack_En.peek() == None :
return "Queue is Empty"
while (self.stack_En.top):
self.stack_De.push(self.stack_En.pop())
pop_item = self.stack_De.pop()
while (self.stack_De.top):
self.stack_En.push(self.stack_De.pop())
return pop_item
def print_(self):
temp = self.stack_En.top
while (temp):
print(temp.value)
temp = temp.next
if __name__ == "__main__":
aghyad = PseudoQueue()
aghyad.enqueue(5)
aghyad.enqueue(6)
aghyad.enqueue(77)
aghyad.dequeue()
aghyad.print_()
|
the-stack_0_20801 | try:
from nrgpy import logger
except ImportError:
pass
from datetime import datetime
from nrgpy.utils.utilities import affirm_directory
from .auth import nrg_api, export_url
import os
import requests
import zipfile
class nrg_api_export(nrg_api):
"""Uses NRG hosted web-based API to download data in text format
To sign up for the service, go to https://services.nrgsystems.com/
Parameters
----------
out_dir : str
path to save exported data
out_file : str
(optional) filename to save
serial_number : str or int
serial number of data logger (like, 820612345)
start_date : str
"YYYY-MM-DD HH:MM:SS" format, if just date it will return the whole day
times are in logger local time
end_date : str
"YYYY-MM-DD HH:MM:SS" format, if just date it will return the whole day
times are in logger local time
client_id : str
provided by NRG Systems
client_secret : str
provided by NRG Systems
save_file : bool
(True) whether to save the result to file
nec_file : str, optional
path to NEC file for custom export formatting
text_timestamps : bool
get export data with text timestamps instead of datetime
export_type : str
[meas], samples, diag, comm
Returns
-------
object
export object that includes an nrgpy reader object
Examples
--------
Download 3 days of data with an NEC file applied
>>> import nrgpy
>>> client_id = "contact [email protected] for access"
>>> client_secret = "contact [email protected] for access"
>>> exporter = nrgpy.nrg_api_export(
client_id=client_id,
client_secret=client_secret,
out_dir=txt_dir,
nec_file='12vbat.nec',
serial_number=820600019,
start_date="2020-05-01",
end_date="2020-05-03",
text_timestamps=False,
save_file=False
)
>>> reader = exporter.reader
>>> reader.format_site_data()
>>> if reader:
>>> print(f"Site number : {reader.site_number}")
>>> print(f"Site description : {reader.site_description}")
>>> reader.interval_check = nrgpy.check_intervals(reader.data)
>>> else:
>>> print("unable to get reader")
"""
def __init__(self, out_dir='', serial_number='', out_file='',
start_date='2014-01-01', end_date='2023-12-31',
client_id='', client_secret='', nec_file='',
export_type='meas', text_timestamps=False,
save_file=True, **kwargs):
super().__init__(client_id, client_secret)
self.txt_file = f'{serial_number}_{start_date}_{end_date}.txt'.replace(':', '-').replace(' ', '_')
self.filepath = os.path.join(out_dir, self.txt_file.replace('txt', 'zip'))
self.out_dir = out_dir
self.out_file = out_file
affirm_directory(self.out_dir)
# self.serial_number = str(serial_number)[-5:] # removing... no longer necessary 2021-01-14
self.serial_number = serial_number
self.start_date = start_date
self.end_date = end_date
self.nec_file = nec_file
self.export_type = export_type
self.text_timestamps = text_timestamps
if self.nec_file:
self.encoded_nec_bytes = self.prepare_file_bytes(self.nec_file)
else:
self.encoded_nec_bytes = ''
self.save_file = save_file
self.reader = self.export()
def export(self):
from nrgpy.read.sympro_txt import sympro_txt_read
self.headers = {"Authorization": "Bearer " + self.session_token}
self.data = {
'serialnumber': self.serial_number,
# 'sitenumber': self.site_number,
'startdate': self.start_date,
'enddate': self.end_date,
'exporttype': self.export_type,
'necfilebytes': self.encoded_nec_bytes
}
self.request_time = datetime.now()
self.resp = requests.post(data=self.data, url=export_url, headers=self.headers)
self.request_duration = datetime.now() - self.request_time
if self.resp.status_code == 200:
with open(self.filepath, 'wb') as f:
f.write(self.resp.content)
with zipfile.ZipFile(self.filepath, 'r') as z:
data_file = z.namelist()[0]
z.extractall(self.out_dir)
reader = sympro_txt_read(
filename=os.path.join(self.out_dir, data_file),
text_timestamps=self.text_timestamps
)
reader.format_site_data()
try:
self.serial_number = reader.logger_sn
self.site_number = reader.site_number
except AttributeError:
pass
os.remove(self.filepath)
os.remove(os.path.join(self.out_dir, data_file))
if self.save_file:
if not self.out_file:
self.out_file = f'{self.site_number}_{self.start_date}_{self.end_date}.txt'.replace(':', '.').replace(' ', '')
else:
self.out_file = os.path.join(self.out_dir, self.txt_file)
reader.output_txt_file(standard=True, out_file=self.out_file)
del self.data['necfilebytes']
self.data['nec_file'] = self.nec_file
reader.post_json = self.data
logger.info(f"export created")
logger.info(f"export took {self.request_duration}")
return reader
else:
logger.error(f"export not created")
logger.debug(f"{self.resp.status_code} | {self.resp.reason}")
logger.debug(self.resp.text.split(':')[1].split('"')[1])
print(self.resp.status_code)
print(self.resp.reason)
return False
|
the-stack_0_20802 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 2021
CO2 emissions for MSOAs or LSOAs combining 2 years at a time, IO part adapted from code by Anne Owen
@author: lenakilian
"""
import pandas as pd
import pickle
import LCFS_functions as lcfs
import copy as cp
import numpy as np
import demand_functions as dm
df = pd.DataFrame
def make_area_footprint(geog, first_year, last_year, working_directory):
"""
Claculate consumption-based household GHG emissions for MSOAs or LSOAs from the LCFS (emissios calculated in LCFS_aggregation_combined_years.py) and the UKMRIO 2020
geog = LSOA or MSOA as str
first_year = first year to calculate footprint as int
last_year = last year to calculate footprint as int
working_directory = full working directory as str (without last '/')
"""
# create data directory from working directory
data_directory = working_directory + "/data/"
#############
# load data #
#############
# load IO data from UKMRIO
meta = pickle.load(open(eval("r'" + data_directory + "raw/UKMRIO_2020/meta.p'"), "rb" ))
# load household spends
hhdspend = pickle.load(open(eval("r'" + data_directory + "processed/LCFS/lcfsXoac/" + geog + "_expenditure.p'"), "rb" ))
# create year lists
years = list(hhdspend.keys())
# load populations
pop = {}
for year in years:
hhdspend[year] = hhdspend[year].drop_duplicates()
pop[year] = hhdspend[year][['population']] / 1000
hhdspend[year] = hhdspend[year].iloc[:,1:].apply(lambda x: x * hhdspend[year]['population'])
# load and clean up concs to make it usable
# these translate IO data sectors to LCFS products/services
#concs_dict = pd.read_excel(eval("r'" + data_directory + "raw/Concordances/COICOP_LCF_concs.xlsx'"), sheet_name=None)
concs_dict2 = pd.read_excel(eval("r'" + data_directory + "raw/Concordances/ONS_to_COICOP_LCF_concs.xlsx'"), sheet_name=None)
for dictionary in ['concs_dict2']: #'concs_dict',
concs = eval(dictionary)
for item in concs.keys():
concs[item] = concs[item].set_index('Unnamed: 0')
#######################
# aggregate emissions #
#######################
# get mean from 2 years
# calculate differnece between years in household data to calculate means for other vairables
if len(years) > 1:
difference = years[1] - years[0]
else:
difference = 0
# calculate means for UKMRIO data
ukmrio = {}; means = {}
for data in ['ghg', 'uk_ghg_direct', 'S', 'U', 'Y']:
ukmrio[data] = pickle.load(open(eval("r'" + data_directory + "raw/UKMRIO_2020/" + data + ".p'"), "rb" ))
means[data] = {}
for year in years:
temp = [ukmrio[data][year + i] for i in range(difference)]
means[data][year] = sum(temp) / difference
for year in list(hhdspend.keys()):
# use concs
temp = np.dot(means['Y'][year], concs_dict2['C43_to_C40'])
means['Y'][year] = df(temp, index = means['Y'][year].index, columns = concs_dict2['C43_to_C40'].columns)
total_Yhh_106 = dm.make_Yhh_106(means['Y'], list(hhdspend.keys()), meta)
coicop_exp_tot = lcfs.expected_totals(hhdspend, list(hhdspend.keys()), concs_dict2, total_Yhh_106)
yhh_wide = lcfs.make_y_hh_307(means['Y'], coicop_exp_tot, list(hhdspend.keys()), concs_dict2, meta)
newY = lcfs.make_new_Y(means['Y'], yhh_wide, meta, list(hhdspend.keys()))
ylcf_props = lcfs.make_ylcf_props(hhdspend, list(hhdspend.keys()))
COICOP_ghg = lcfs.makefoot(means['S'], means['U'], newY, means['ghg'], list(hhdspend.keys()))
Total_ghg = {}; PC_ghg = {}
for year in list(hhdspend.keys()):
COICOP_ghg[year][160] += means['uk_ghg_direct'][year][1]
COICOP_ghg[year][101] += means['uk_ghg_direct'][year][0]
# this gives GHG emissions for the groups, break down to per capita emissions
temp = np.dot(ylcf_props[year], np.diag(COICOP_ghg[year]))
Total_ghg[year] = df(temp, index=hhdspend[year].index, columns=hhdspend[year].columns)
Total_ghg[year] = Total_ghg[year].join(pop[year])
PC_ghg[year] = cp.copy(Total_ghg[year])
PC_ghg[year].iloc[:,:-1] = PC_ghg[year].iloc[:,:-1].apply(lambda x: x/PC_ghg[year]['population'])
return(PC_ghg)
#################
# save datasets #
#################
def save_footprint_data(PC_ghg, working_directory, geog):
"""
Save per capita GHG emissions of UK MSOAs or LSOAs
PC_ghg = per capita emissions of UK MSOAs or LSOAs as dictrionary containing pandas.DataFrame
working_directory = full working directory as str (without last '/')
"""
data_directory = working_directory + "/data/"
years = list(PC_ghg.keys())
if len(years) > 1:
difference = years[1] - years[0]
else:
difference = 0
for year in years:
if difference > 1:
year_str = str(year) + '-' + str(year + difference - 1)
else:
year_str = str(year)
PC_ghg[year].to_csv(eval("r'" + data_directory + "processed/GHG_Estimates/" + geog + '_' + year_str + ".csv'"))
print("Saved: " + data_directory + "processed/GHG_Estimates/" + geog + '_' + year_str + ".csv")
# code run in run_all.py |
the-stack_0_20803 | """Module to handle all of the graphics components.
'rendering' converts a display specification (such as :0) into an actual
Display object. Pyglet only supports multiple Displays on Linux.
"""
from datetime import datetime
import math
import os
from random import randint
from time import strftime
from gym.utils import reraise
import numpy as np
from scipy.misc import imresize as resize
try:
import pyglet
except ImportError as error:
reraise(
suffix="Install pyglet with 'pip install pyglet'. If you want to just "
"install all Gym dependencies, run 'pip install -e .[all]' or "
"'pip install gym[all]'.")
try:
from pyglet.gl import *
LAYER_BACKGROUND = pyglet.graphics.OrderedGroup(0)
LAYER_FOREGROUND = pyglet.graphics.OrderedGroup(1)
LAYER_TOP = pyglet.graphics.OrderedGroup(2)
except pyglet.canvas.xlib.NoSuchDisplayException as error:
print("Import error NSDE! You will not be able to render --> %s" % error)
except ImportError as error:
print("Import error GL! You will not be able to render --> %s" % error)
from . import constants
from . import utility
__location__ = os.path.dirname(os.path.realpath(__file__))
RESOURCE_PATH = os.path.join(__location__, constants.RESOURCE_DIR)
class Viewer(object):
''' Base class for the graphics module.
Used to share common functionality between the different
rendering engines.
'''
def __init__(self):
self.window = None
self.display = None
self._agents = []
self._agent_count = 0
self._board_state = None
self._batch = None
self.window = None
self._step = 0
self._agent_view_size = None
self._is_partially_observable = False
self.isopen = False
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def set_board(self, state):
self._board_state = state
def set_bombs(self, bombs):
self._bombs = bombs
def set_agents(self, agents):
self._agents = agents
self._agent_count = len(agents)
def set_step(self, step):
self._step = step
def close(self):
self.window.close()
self.isopen = False
def window_closed_by_user(self):
self.isopen = False
def save(self, path):
now = datetime.now()
filename = now.strftime('%m-%d-%y_%H-%M-%S_') + str(
self._step) + '.png'
path = os.path.join(path, filename)
pyglet.image.get_buffer_manager().get_color_buffer().save(path)
class PixelViewer(Viewer):
'''Renders the game as a set of square pixels'''
def __init__(self,
display=None,
board_size=11,
agents=[],
partially_observable=False,
agent_view_size=None,
game_type=None):
super().__init__()
from gym.envs.classic_control import rendering
self.display = rendering.get_display(display)
self._board_size = board_size
self._agent_count = len(agents)
self._agents = agents
self._is_partially_observable = partially_observable
self._agent_view_size = agent_view_size
def render(self):
frames = self.build_frame()
if self.window is None:
height, width, _channels = frames.shape
self.window = pyglet.window.Window(
width=4 * width,
height=4 * height,
display=self.display,
vsync=False,
resizable=True)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
'''Registers an event handler with a pyglet window to resize the window'''
self.width = width
self.height = height
@self.window.event
def on_close():
''' Registers an event handler with a pyglet to tell the render engine the
window is closed
'''
self.isopen = True
assert len(frames.shape
) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(
frames.shape[1],
frames.shape[0],
'RGB',
frames.tobytes(),
pitch=frames.shape[1] * -3)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0, 0, width=self.window.width, height=self.window.height)
self.window.flip()
def build_frame(self):
board = self._board_state
board_size = self._board_size
agents = self._agents
human_factor = constants.HUMAN_FACTOR
rgb_array = self.rgb_array(board, board_size, agents,
self._is_partially_observable,
self._agent_view_size)
all_img = resize(
rgb_array[0],
(board_size * human_factor, board_size * human_factor),
interp='nearest')
other_imgs = [
resize(
frame, (int(board_size * human_factor / 4),
int(board_size * human_factor / 4)),
interp='nearest') for frame in rgb_array[1:]
]
other_imgs = np.concatenate(other_imgs, 0)
img = np.concatenate([all_img, other_imgs], 1)
return img
@staticmethod
def rgb_array(board, board_size, agents, is_partially_observable,
agent_view_size):
frames = []
all_frame = np.zeros((board_size, board_size, 3))
num_items = len(constants.Item)
for row in range(board_size):
for col in range(board_size):
value = board[row][col]
if utility.position_is_agent(board, (row, col)):
num_agent = value - num_items
if agents[num_agent].is_alive:
all_frame[row][col] = constants.AGENT_COLORS[num_agent]
else:
all_frame[row][col] = constants.ITEM_COLORS[value]
all_frame = np.array(all_frame)
frames.append(all_frame)
for agent in agents:
row, col = agent.position
my_frame = all_frame.copy()
for r in range(board_size):
for c in range(board_size):
if is_partially_observable and not all([
row >= r - agent_view_size, row <
r + agent_view_size, col >= c - agent_view_size,
col < c + agent_view_size
]):
my_frame[r, c] = constants.ITEM_COLORS[
constants.Item.Fog.value]
frames.append(my_frame)
return frames
class PommeViewer(Viewer):
'''The primary render engine for pommerman.'''
def __init__(self,
display=None,
board_size=11,
agents=[],
partially_observable=False,
agent_view_size=None,
game_type=None):
super().__init__()
from gym.envs.classic_control import rendering
self.display = rendering.get_display(display)
board_height = constants.TILE_SIZE * board_size
height = math.ceil(board_height + (constants.BORDER_SIZE * 2) +
(constants.MARGIN_SIZE * 3))
width = math.ceil(board_height + board_height / 4 +
(constants.BORDER_SIZE * 2) + constants.MARGIN_SIZE*10)
self._height = height
self._width = width
self.window = pyglet.window.Window(
width=width, height=height, display=display)
self.window.set_caption('Pommerman')
self.isopen = True
self._board_size = board_size
self._resource_manager = ResourceManager(game_type)
self._tile_size = constants.TILE_SIZE
self._agent_tile_size = (board_height / 4) / board_size
self._agent_count = len(agents)
self._agents = agents
self._game_type = game_type
self._is_partially_observable = partially_observable
self._agent_view_size = agent_view_size
@self.window.event
def close(self):
'''Pyglet event handler to close the window'''
self.window.close()
self.isopen = False
def render(self):
self.window.switch_to()
self.window.dispatch_events()
self._batch = pyglet.graphics.Batch()
background = self.render_background()
text = self.render_text()
agents = self.render_dead_alive()
board = self.render_main_board()
agents_board = self.render_agents_board()
self._batch.draw()
self.window.flip()
def render_main_board(self):
board = self._board_state
size = self._tile_size
x_offset = constants.BORDER_SIZE
y_offset = constants.BORDER_SIZE
top = self.board_top(-constants.BORDER_SIZE - 8)
return self.render_board(board, x_offset, y_offset, size, top)
def render_agents_board(self):
x_offset = self._board_size * self._tile_size + constants.BORDER_SIZE
x_offset += constants.MARGIN_SIZE
size = self._agent_tile_size
agents = []
top = self._height - constants.BORDER_SIZE + constants.MARGIN_SIZE
for agent in self._agents:
y_offset = agent.agent_id * size * self._board_size + (
agent.agent_id * constants.MARGIN_SIZE) + constants.BORDER_SIZE
agent_board = self.agent_view(agent)
sprite = self.render_board(agent_board, x_offset, y_offset, size,
top)
agents.append(sprite)
return agents
def render_board(self, board, x_offset, y_offset, size, top=0):
sprites = []
for row in range(self._board_size):
for col in range(self._board_size):
x = col * size + x_offset
y = top - y_offset - row * size
tile_state = board[row][col]
if tile_state == constants.Item.Bomb.value:
bomb_life = self.get_bomb_life(row, col)
tile = self._resource_manager.get_bomb_tile(bomb_life)
else:
tile = self._resource_manager.tile_from_state_value(tile_state)
tile.width = size
tile.height = size
sprite = pyglet.sprite.Sprite(
tile, x, y, batch=self._batch, group=LAYER_FOREGROUND)
sprites.append(sprite)
return sprites
def agent_view(self, agent):
if not self._is_partially_observable:
return self._board_state
agent_view_size = self._agent_view_size
state = self._board_state.copy()
fog_value = self._resource_manager.fog_value()
row, col = agent.position
for r in range(self._board_size):
for c in range(self._board_size):
if self._is_partially_observable and not all([
row >= r - agent_view_size, row <= r + agent_view_size,
col >= c - agent_view_size, col <= c + agent_view_size
]):
state[r][c] = fog_value
return state
def render_background(self):
image_pattern = pyglet.image.SolidColorImagePattern(
color=constants.BACKGROUND_COLOR)
image = image_pattern.create_image(self._width, self._height)
return pyglet.sprite.Sprite(
image, 0, 0, batch=self._batch, group=LAYER_BACKGROUND)
def render_text(self):
text = []
board_top = self.board_top(y_offset=8)
title_label = pyglet.text.Label(
'Pommerman',
font_name='Cousine-Regular',
font_size=36,
x=constants.BORDER_SIZE,
y=board_top,
batch=self._batch,
group=LAYER_TOP)
title_label.color = constants.TILE_COLOR
text.append(title_label)
info_text = ''
if self._game_type is not None:
info_text += 'Mode: ' + self._game_type.name + ' '
info_text += 'Time: ' + strftime('%b %d, %Y %H:%M:%S')
info_text += ' Step: ' + str(self._step)
time_label = pyglet.text.Label(
info_text,
font_name='Arial',
font_size=10,
x=constants.BORDER_SIZE,
y=5,
batch=self._batch,
group=LAYER_TOP)
time_label.color = constants.TEXT_COLOR
text.append(time_label)
board_right = self.board_right(x_offset=150)
acts = self._agents[0].acts
best = int(acts[-1])
action = pyglet.text.Label(
'Action-Values',
font_name='Arial',
font_size=10,
x=board_right,
y=200,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Stop.name + ': ' + acts[constants.Action.Stop.value],
font_name='Arial',
font_size=10,
x=board_right,
y=185,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Stop.value != best else constants.TILE_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Up.name + ': ' + acts[constants.Action.Up.value],
font_name='Arial',
font_size=10,
x=board_right,
y=170,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Up.value != best else constants.TILE_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Down.name + ': ' + acts[constants.Action.Down.value],
font_name='Arial',
font_size=10,
x=board_right,
y=155,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Down.value != best else constants.TILE_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Left.name + ': ' + acts[constants.Action.Left.value],
font_name='Arial',
font_size=10,
x=board_right,
y=140,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Left.value != best else constants.TILE_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Right.name + ': ' + acts[constants.Action.Right.value],
font_name='Arial',
font_size=10,
x=board_right,
y=125,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Right.value != best else constants.TILE_COLOR
text.append(action)
action = pyglet.text.Label(
constants.Action.Bomb.name + ': ' + acts[constants.Action.Bomb.value],
font_name='Arial',
font_size=10,
x=board_right,
y=110,
batch=self._batch,
group=LAYER_TOP)
action.color = constants.TEXT_COLOR if constants.Action.Bomb.value != best else constants.TILE_COLOR
text.append(action)
return text
def render_dead_alive(self):
board_top = self.board_top(y_offset=5)
image_size = 30
spacing = 5
dead = self._resource_manager.dead_marker()
dead.width = image_size
dead.height = image_size
sprites = []
if self._game_type is constants.GameType.FFA:
agents = self._agents
else:
agents = [self._agents[i] for i in [0,2,1,3]]
for index, agent in enumerate(agents):
# weird math to make sure the alignment
# is correct. 'image_size + spacing' is an offset
# that includes padding (spacing) for each image.
# '4 - index' is used to space each agent out based
# on where they are in the array based off of their
# index.
x = self.board_right() - (4 - index) * (
image_size + spacing)
y = board_top
agent_image = self._resource_manager.agent_image(agent.agent_id)
agent_image.width = image_size
agent_image.height = image_size
sprites.append(
pyglet.sprite.Sprite(
agent_image,
x,
y,
batch=self._batch,
group=LAYER_FOREGROUND))
if agent.is_alive is False:
sprites.append(
pyglet.sprite.Sprite(
dead, x, y, batch=self._batch, group=LAYER_TOP))
return sprites
def board_top(self, y_offset=0):
return constants.BORDER_SIZE + (
self._board_size * self._tile_size) + y_offset
def board_right(self, x_offset=0):
return constants.BORDER_SIZE + (
self._board_size * self._tile_size) + x_offset
def get_bomb_life(self, row, col):
for bomb in self._bombs:
x, y = bomb.position
if x == row and y == col:
return bomb.life
class ResourceManager(object):
'''Handles sprites and other resources for the PommeViewer'''
def __init__(self, game_type):
self._index_resources()
self._load_fonts()
self.images = self._load_images()
self.bombs = self._load_bombs()
self._fog_value = self._get_fog_index_value()
self._is_team = True
if game_type == constants.GameType.FFA:
self._is_team = False
@staticmethod
def _index_resources():
# Tell pyglet where to find the resources
pyglet.resource.path = [RESOURCE_PATH]
pyglet.resource.reindex()
@staticmethod
def _load_images():
images_dict = constants.IMAGES_DICT
for i in range(0, len(images_dict)):
image_data = images_dict[i]
image = pyglet.resource.image(image_data['file_name'])
images_dict[i]['image'] = image
return images_dict
@staticmethod
def _load_bombs():
images_dict = constants.BOMB_DICT
for i in range(0, len(images_dict)):
image_data = images_dict[i]
image = pyglet.resource.image(image_data['file_name'])
images_dict[i]['image'] = image
return images_dict
@staticmethod
def _load_fonts():
for i in range(0, len(constants.FONTS_FILE_NAMES)):
font_path = os.path.join(RESOURCE_PATH,
constants.FONTS_FILE_NAMES[i])
pyglet.font.add_file(font_path)
@staticmethod
def _get_fog_index_value():
for id, data in constants.IMAGES_DICT.items():
if data['name'] == 'Fog':
return id
def tile_from_state_value(self, value):
if self._is_team and value in range(10, 14):
return self.images[value + 10]['image']
return self.images[value]['image']
def agent_image(self, agent_id):
if self._is_team:
return self.images[agent_id + 24]['image']
return self.images[agent_id + 15]['image']
def dead_marker(self):
return self.images[19]['image']
def fog_value(self):
return self._fog_value
def fog_tile(self):
img = self.images[self._fog_value]
return img['image']
def get_bomb_tile(self, life):
return self.bombs[life - 1]['image']
|
the-stack_0_20804 | import sys
from pytest import raises
from zeiterfassung import main
def test_urlaub():
sys.argv[1:] = [
"--db_path", "/tmp/",
"--date", "2018-07-18",
"--user", "test_urlaub",
"--export", "",
"--urlaub"
]
db = main(db={})
day = db[2018][7][29][18]
assert "urlaub" in day["comment"].lower()
assert "start" not in day
assert "end" not in day
assert "pause" not in day
assert day["Tagessaldo"] == "0:00"
def test_zeitausgleich():
sys.argv[1:] = [
"--db_path", "/tmp/",
"--date", "2018-07-18",
"--user", "test_zeitausgleich",
"--work_time", "8:00",
"--export", "",
"--zeitausgleich"
]
db = main(db={})
day = db[2018][7][29][18]
assert "zeitausgleich" in day["comment"].lower()
assert "start" not in day
assert "end" not in day
assert "pause" not in day
assert day["Tagessaldo"] == "-8:00"
def test_wochenend():
sys.argv[1:] = [
"--db_path", "/tmp/",
"--date", "2018-07-21",
"--user", "test_zeitausgleich",
"--work_time", "8:00",
"--start", "9:00",
"--end", "18:00",
"--pause", "0",
"--export", ""
]
db = main(db={})
day = db[2018][7][29][21]
assert day["Arbeitszeit"] == day["Tagessaldo"]
assert "wochenende" in day["comment"].lower()
|
the-stack_0_20808 | import logging
import json
import torch
from torch.utils.data import DataLoader
import hydra
from omegaconf import DictConfig, OmegaConf
from mtb.data import TACREDDataset, SemEvalDataset, SmilerDataset
from mtb.model import MTBModel
from mtb.processor import BatchTokenizer, aggregate_batch
from mtb.train_eval import train_and_eval
from mtb.utils import resolve_relative_path, seed_everything
logger = logging.getLogger(__name__)
@hydra.main(config_name="config", config_path="configs")
def main(cfg: DictConfig) -> None:
"""
Conducts evaluation given the configuration.
Args:
cfg: Hydra-format configuration given in a dict.
"""
resolve_relative_path(cfg)
print(OmegaConf.to_yaml(cfg))
seed_everything(cfg.seed)
# prepare dataset: parse raw dataset and do some simple pre-processing such as
# convert special tokens and insert entity markers
entity_marker = True if cfg.variant in ["d", "e", "f"] else False
if "tacred" in cfg.train_file.lower():
train_dataset = TACREDDataset(cfg.train_file, entity_marker=entity_marker)
eval_dataset = TACREDDataset(cfg.eval_file, entity_marker=entity_marker)
layer_norm = False
elif "semeval" in cfg.train_file.lower():
train_dataset = SemEvalDataset(cfg.train_file, entity_marker=entity_marker)
eval_dataset = SemEvalDataset(cfg.eval_file, entity_marker=entity_marker)
layer_norm = True
elif "smiler" in cfg.train_file.lower():
train_dataset = SmilerDataset(cfg.train_file, entity_marker=entity_marker)
eval_dataset = SmilerDataset(cfg.eval_file, entity_marker=entity_marker)
layer_norm = True
label_to_id = train_dataset.label_to_id
# set dataloader
train_loader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
shuffle=True,
pin_memory=True,
collate_fn=aggregate_batch,
)
eval_loader = DataLoader(
eval_dataset,
batch_size=cfg.batch_size,
shuffle=False,
pin_memory=True,
collate_fn=aggregate_batch,
)
# set a processor that tokenizes and aligns all the tokens in a batch
batch_processor = BatchTokenizer(
tokenizer_name_or_path=cfg.model,
variant=cfg.variant,
max_length=cfg.max_length,
)
vocab_size = len(batch_processor.tokenizer)
# set model and device
model = MTBModel(
encoder_name_or_path=cfg.model,
variant=cfg.variant,
layer_norm=layer_norm,
vocab_size=vocab_size,
num_classes=len(label_to_id),
dropout=cfg.dropout,
)
device = (
torch.device("cuda", cfg.cuda_device)
if cfg.cuda_device > -1
else torch.device("cpu")
)
micro_f1, macro_f1 = train_and_eval(
model,
train_loader,
eval_loader,
label_to_id,
batch_processor,
num_epochs=cfg.num_epochs,
lr=cfg.lr,
device=device,
)
logger.info(
"Evaluation micro-F1: {:.4f}, macro_f1: {:.4f}.".format(micro_f1, macro_f1)
)
# save evaluation results to json
with open("./results.json", "w") as f:
json.dump({"micro_f1": micro_f1, "macro_f1": macro_f1}, f, indent=4)
if __name__ == "__main__":
main()
|
the-stack_0_20809 | # Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
from typing import Optional, Text
from absl import logging
import jax
import jax.numpy as jnp
def topk_accuracy(
logits: jnp.ndarray,
labels: jnp.ndarray,
topk: int,
ignore_label_above: Optional[int] = None,
) -> jnp.ndarray:
"""Top-num_codes accuracy."""
assert len(labels.shape) == 1, 'topk expects 1d int labels.'
assert len(logits.shape) == 2, 'topk expects 2d logits.'
if ignore_label_above is not None:
logits = logits[labels < ignore_label_above, :]
labels = labels[labels < ignore_label_above]
prds = jnp.argsort(logits, axis=1)[:, ::-1]
prds = prds[:, :topk]
total = jnp.any(prds == jnp.tile(labels[:, jnp.newaxis], [1, topk]), axis=1)
return total
def softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
reduction: Optional[Text] = 'mean',
) -> jnp.ndarray:
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
if reduction == 'sum':
return jnp.sum(loss)
elif reduction == 'mean':
return jnp.mean(loss)
elif reduction == 'none' or reduction is None:
return loss
else:
raise ValueError(f'Incorrect reduction mode {reduction}')
def l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-12,
) -> jnp.ndarray:
"""l2 normalize a tensor on an axis with numerical stability."""
square_sum = jnp.sum(jnp.square(x), axis=axis, keepdims=True)
x_inv_norm = jax.lax.rsqrt(jnp.maximum(square_sum, epsilon))
return x * x_inv_norm
def l2_weight_regularizer(params):
"""Helper to do lasso on weights.
Args:
params: the entire param set.
Returns:
Scalar of the l2 norm of the weights.
"""
l2_norm = 0.
for mod_name, mod_params in params.items():
if 'norm' not in mod_name:
for param_k, param_v in mod_params.items():
if param_k != 'b' not in param_k: # Filter out biases
l2_norm += jnp.sum(jnp.square(param_v))
else:
logging.warning('Excluding %s/%s from optimizer weight decay!',
mod_name, param_k)
else:
logging.warning('Excluding %s from optimizer weight decay!', mod_name)
return 0.5 * l2_norm
def regression_loss(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
"""Byol's regression loss. This is a simple cosine similarity."""
normed_x, normed_y = l2_normalize(x, axis=-1), l2_normalize(y, axis=-1)
return jnp.sum((normed_x - normed_y)**2, axis=-1)
def bcast_local_devices(value):
"""Broadcasts an object to all local devices."""
devices = jax.local_devices()
def _replicate(x):
"""Replicate an object on each device."""
x = jnp.array(x)
return jax.device_put_sharded(len(devices) * [x], devices)
return jax.tree_util.tree_map(_replicate, value)
def get_first(xs):
"""Gets values from the first device."""
return jax.tree_map(lambda x: x[0], xs)
|
the-stack_0_20810 | """
drugstone.task.tasks_result
This module implements the class :class:`TasksResult` for the drugstone API.
:copyright: 2022 Institute for Computational Systems Biology by Prof. Dr. Jan Baumbach
:author: Ugur Turhan
"""
import sys
import json
import logging
from typing import List
from .task import Task
from .scripts.create_path import create_path
from .scripts.make_upsetplot import make_upset_plot
class TasksResult:
"""Represents the results of a list of :class:`Task` objects.
get_tasks_list() -> List[:class:`Task`]:
Returns the list of tasks.
to_dict() -> dict:
Returns a dict with the results of the tasks.
download_json(path: str, name: str) -> None:
Downloads a json file with the results.
create_upset_plot() -> None:
Opens a new window with an upset plot of the results.
"""
def __init__(self, tasks: List[Task] = []) -> None:
self.__tasks = tasks
def get_tasks_list(self) -> List[Task]:
"""Returns the list of tasks."""
return self.__tasks
def to_dict(self) -> dict:
"""Returns a dict with the results of the tasks."""
d = {}
for t in self.__tasks:
d[t.get_parameters()["taskId"]] = {
"info": t.get_info(),
"parameters": t.get_parameters(),
"results": t.get_result().to_dict()
}
return d
def download_json(self, path: str = "", name: str = "result") -> None:
"""Downloads a json file with the results.
:param str path: (optional) Path, where to download the file. Defaults to the current path.
:param str name: (optional) Name for the file. Defaults to 'result'.
"""
path = create_path(path, name, "json")
with open(path, "x") as f:
json.dump(self.to_dict(), f, indent=4)
def create_upset_plot(self) -> None:
"""Opens a new window with an upset plot of the drug results.
At least one of the tasks has to be a drug-search.
This is only available with python 3.6!
"""
has_drugs = False
for task in self.get_tasks_list():
if task.get_result().get_drugs():
has_drugs = True
if has_drugs:
if sys.version_info[:2] == (3, 6):
logging.info("IMPORTANT: The script pauses for the new window, for the UpSet plot! "
+ "Close the UpSet plot window, for the script to continue or terminate! ")
make_upset_plot(self.to_dict())
else:
logging.warn("create_upset_plot() is only compatible with Python 3.6!")
else:
logging.warn("Something went wrong! "
+ "At least one task has to be a drug-search. "
+ "No drugs were found.")
|
the-stack_0_20812 | """
Helpers for parsing methods and attributes.
"""
import inspect
import textwrap
from types import MethodType
from typing import Any, Dict, List
from boto3.resources.base import ServiceResource as Boto3ServiceResource
from mypy_boto3_builder.logger import get_logger
from mypy_boto3_builder.parsers.docstring_parser.argspec_parser import ArgSpecParser
from mypy_boto3_builder.parsers.docstring_parser.docstring_parser import DocstringParser
from mypy_boto3_builder.service_name import ServiceName
from mypy_boto3_builder.structures.attribute import Attribute
from mypy_boto3_builder.structures.method import Method
from mypy_boto3_builder.type_maps.docstring_type_map import get_type_from_docstring
from mypy_boto3_builder.type_maps.method_argument_map import get_method_arguments_stub
from mypy_boto3_builder.type_maps.method_type_map import get_method_type_stub
from mypy_boto3_builder.utils.strings import get_class_prefix
def get_public_methods(inspect_class: Any) -> Dict[str, MethodType]:
"""
Extract public methods from any class.
Arguments:
inspect_class -- Inspect class.
Returns:
A dictionary of method name and method.
"""
class_members = inspect.getmembers(inspect_class)
methods: Dict[str, MethodType] = {}
for name, member in class_members:
if not inspect.ismethod(member):
continue
if name.startswith("_"):
continue
methods[name] = member
return methods
def parse_attributes(
service_name: ServiceName, resource_name: str, resource: Boto3ServiceResource
) -> List[Attribute]:
"""
Extract attributes from boto3 resource.
Arguments:
resource -- boto3 service resource.
Returns:
A list of Attribute structures.
"""
result: List[Attribute] = []
if not resource.meta.client:
return result
if not resource.meta.resource_model:
return result
service_model = resource.meta.client.meta.service_model
if resource.meta.resource_model.shape:
shape = service_model.shape_for(resource.meta.resource_model.shape)
attributes = resource.meta.resource_model.get_attributes(shape)
for name, attribute in attributes.items():
argument_type = get_method_type_stub(service_name, resource_name, "_attributes", name)
if argument_type is None:
argument_type = get_type_from_docstring(attribute[1].type_name)
result.append(Attribute(name, argument_type))
return result
def parse_method(
parent_name: str, name: str, method: MethodType, service_name: ServiceName
) -> Method:
"""
Parse method to a structure.
Arguments:
parent_name -- Parent class name.
method -- Inspect method.
Returns:
Method structure.
"""
logger = get_logger()
docstring = textwrap.dedent(inspect.getdoc(method) or "")
method_name = f"{parent_name}.{name}"
logger.debug(f"Slow parsing of {method_name}: {len(docstring)} chars")
prefix = f"{get_class_prefix(parent_name)}{get_class_prefix(name)}"
arg_spec_parser = ArgSpecParser(prefix, service_name)
arguments = get_method_arguments_stub(service_name, parent_name, name)
if arguments is None:
arguments = arg_spec_parser.get_arguments(parent_name, name, method)
docstring_parser = DocstringParser(service_name, parent_name, name, arguments)
arguments = docstring_parser.get_arguments(docstring)
return_type = arg_spec_parser.get_return_type(parent_name, name)
if return_type is None:
return_type = DocstringParser(service_name, parent_name, name, []).get_return_type(
docstring
)
return Method(name=name, arguments=arguments, return_type=return_type)
|
the-stack_0_20813 | ## postprocessing
import numpy as np
import sys
import pickle
import joblib
import matplotlib.ticker as ticker
sys.dont_write_bytecode = True
sys.path.insert(0, '../../')
from matplotlib import pyplot as plt
from SKDMD.PREP_DATA_SRC.source_code.lib.utilities import mkdir
from scipy.io import loadmat
plt.style.use('siads')
plt.locator_params(axis='y', nbins=6)
plt.locator_params(axis='x', nbins=10)
FIG_SIZE = (8,8)
N_CPU = joblib.cpu_count()
def fmt(x, pos):
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
class ClassKoopmanPPS(object):
def __init__(self, pps_dir, eval_dir, model_dir, params, draw_eigen_function_plot=True, compare_against_spdmd=False):
self.pps_dir = pps_dir
self.eval_dir = eval_dir
self.model_dir = model_dir
mkdir(self.pps_dir)
self.params = params
self.type = None
self.dt = None
self.index_selected_in_full = None
self.draw_eigen_function_plot = draw_eigen_function_plot
self.compare_against_spdmd = compare_against_spdmd
def pps_eigenfunction(self):
raise NotImplementedError("Postprocessing for eigenfunction need to be implemented!")
def plot_eigenfunction_given_index_and_path(self, index_of_eigens, save_path):
raise NotImplementedError("Postprocessing for eigenfunction need to be implemented!")
def pps_eigenvalues(self):
raise NotImplementedError("Postprocessing for eigenvalues need to be implemented!")
def pps_eigenmodes_eval(self):
raise NotImplementedError("Postprocessing for eigenmodes evaluation need to be implemented!")
def pps_scatter_plot_eigenvalue(self, index_selected_array, path_to_save,zoomed_X_Y_max, case_specific_frequency_dict):
raise NotImplementedError("Postprocessing for plotting scatter eigenvalues evaluation need to be implemented!")
def pps_sweep_alpha(self, zoomed_X_Y_max=None, case_specific_frequency_dict={'draw_st':False}):
fig_data = np.load(self.eval_dir + '/MultiTaskElasticNet_result.npz')
alphas_enet = fig_data['alphas_enet']
coefs_enet_comp = fig_data['coefs_enet_comp']
# loop over all alpha
for ii, alpha in enumerate(alphas_enet):
print("current alpha = ", alpha, " index = ",ii)
# 1 make directory
alpha_dir_eval = self.eval_dir + '/sweep/sweep_alpha_' + str(alpha)
alpha_dir = self.pps_dir + '/sweep_alpha_' + str(alpha)
mkdir(alpha_dir)
# 2 compute the index of selected modes
non_zero_index_bool_array = np.linalg.norm(coefs_enet_comp[:, :, ii],axis=0) > 0
further_selected_index_array = self.index_selected_in_full[non_zero_index_bool_array]
print("number of non-zero coef = ", len(further_selected_index_array))
if len(further_selected_index_array) > 0:
# 3 draw the eigenvalue selected plot
self.pps_scatter_plot_eigenvalue(index_selected_array=further_selected_index_array,
path_to_save=alpha_dir,
zoomed_X_Y_max=zoomed_X_Y_max,
case_specific_frequency_dict=case_specific_frequency_dict)
# 4. load and draw trajectory comparison
# load npz data from eval directory for parameter sweep
fig_data = np.load(alpha_dir_eval + '/save_trj_comparison.npz')
true_trajectory = fig_data['ttrj']
pred_trajectory = fig_data['ptrj']
true_tsnap = fig_data['tt']
self.pps_plot_trajectory_given_pred(true_tsnap=true_tsnap,
pred_trajectory=pred_trajectory,
true_trajectory=true_trajectory,
path_to_save_fig=alpha_dir)
# 5. load and draw eigenfunction plot
if self.draw_eigen_function_plot:
self.plot_eigenfunction_given_index_and_path(index_of_eigens=further_selected_index_array, save_path=alpha_dir)
return
def pps_plot_trajectory_given_pred(self, true_tsnap, pred_trajectory, true_trajectory, path_to_save_fig):
# # plot
num_components = true_trajectory.shape[1]
for i_comp in range(num_components):
# plt.figure(figsize=FIG_SIZE)
plt.figure(figsize=(8, 4))
plt.plot(true_tsnap, true_trajectory[:, i_comp], 'k-', label='true')
plt.plot(true_tsnap, pred_trajectory[:, i_comp], 'r--',label='pred')
plt.xlabel('time',fontsize = 32)
plt.ylabel(r'$x_{' + str(i_comp + 1) + '}$',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(path_to_save_fig + '/component_' + str(i_comp + 1) + '.png', bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
print('finished drawing E/KDMD prediction plot')
## if comparison against spdmd is needed...
if self.compare_against_spdmd:
# load data from matlab solution of SPDMD
# # cylinder case
# data_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/20_pred.mat')
# data_full_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_pred.mat')
# data_spdmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_sp_pred.mat')
# # ship case
# data_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/20_pred.mat')
# data_full_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_pred.mat')
# data_spdmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_sp_pred.mat') # cylinder re 70
data_spdmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_sp_pred_ship.mat') # ship airwake
# dmd_pred = data_dmd['x_list'].T
# full_dmd_pred = data_full_dmd['x_list'].T
sp_dmd_pred = np.real(data_spdmd['x_list_2'].T[:,:-1])
# plot them others
for i_comp in range(num_components):
# plt.figure(figsize=FIG_SIZE)
plt.figure(figsize=(8,4))
plt.plot(true_tsnap[:-1], true_trajectory[:-1, i_comp], 'k-', label='true')
plt.plot(true_tsnap[:-1], pred_trajectory[:-1, i_comp], 'r--', label='spKDMD')
# plt.plot(true_tsnap[1:], dmd_pred[1:, i_comp], 'c--', label='DMD r=20')
# plt.plot(true_tsnap[:-1], full_dmd_pred[:-1, i_comp], 'g--', label='full DMD (r=200)')
plt.plot(true_tsnap[:], sp_dmd_pred[:, i_comp], 'b--', label='spDMD r=200')
plt.xlabel('time', fontsize=32)
plt.ylabel(r'$x_{' + str(i_comp + 1) + '}$', fontsize=32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(path_to_save_fig + '/vs_spdmd_component_' + str(i_comp + 1) + '.png', bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
def pps_component_trj(self):
## LOAD SaveCompareWithTruth
fig_data = np.load(self.eval_dir + '/save_trj_comparison.npz')
true_trajectory = fig_data['ttrj']
pred_trajectory = fig_data['ptrj']
true_tsnap = fig_data['tt']
self.pps_plot_trajectory_given_pred(true_tsnap=true_tsnap,
pred_trajectory=pred_trajectory,
true_trajectory=true_trajectory,
path_to_save_fig=self.pps_dir)
def pps_2d_data_dist(self, data_path):
# only obtain the phase space locations
data2D = np.load(data_path)['Xtrain']
plt.figure(figsize=FIG_SIZE)
plt.scatter(data2D[:,0], data2D[:,1], s=0.1 ,c='k')
plt.xlabel(r'$x_1$',fontsize = 32)
plt.ylabel(r'$x_2$',fontsize = 32)
plt.savefig(
self.pps_dir + '/trainPhaseDist.png',
bbox_inches='tight'
)
plt.close()
def get_cmap(self, n, name='rainbow'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
class ClassDictPPS(ClassKoopmanPPS):
def pps_singular_value(self):
raise NotImplementedError()
def pps_eigenmodes_eval(self, y_scale_linear_error=None, y_scale_recon_error=None):
## LOAD ComputeSaveNormalizedEigenError: fig1
fig1_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig1.npz')
normalized_relative_error = fig1_data['nre']
true_tsnap = fig1_data['tt']
linearEvolvingEigen = fig1_data['le']
relative_error = self.params['relative_error']
## draw ComputeSaveNormalizedEigenError: fig1
# but if it is too much, just skip..
if normalized_relative_error.shape[1] <= 30:
# plot normalized relative error for each eigenmodes
plt.figure(figsize=FIG_SIZE)
cmap = self.get_cmap(normalized_relative_error.shape[1])
for i in range(normalized_relative_error.shape[1]):
plt.plot(true_tsnap, normalized_relative_error[:, i], '-', c=cmap(i), label= str(i + 1) + 'th-eigenvalue: ' + "{0:.3f}".format(linearEvolvingEigen[i,i]))
plt.xlabel('time',fontsize = 32)
if relative_error:
plt.ylabel('normalized error',fontsize = 32)
else:
plt.ylabel('error',fontsize = 32)
plt.yscale('log')
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/normalized_relative_eigen_error.png',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
## LOAD ComputeSaveNormalizedEigenError: fig2
fig2_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig2.npz')
mean_normalized_relative_error = fig2_data['mre']
small_to_large_error_eigen_index = fig2_data['stli']
small_to_large_error_eigen_index_kou = fig2_data['stli_kou']
abs_sum_kou = fig2_data['abs_sum_kou']
error_reconstruct_state_list = fig2_data['ersl']
# bool_index_further = fig2_data['iestli'] ## it is removed..
self.small_to_large_error_eigen_index = small_to_large_error_eigen_index
# self.bool_index_further = bool_index_further
## draw ComputeSaveNormalizedEigenError: fig2
# error ordered
fig= plt.figure(figsize=FIG_SIZE)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(range(1, normalized_relative_error.shape[1] + 1),
mean_normalized_relative_error[small_to_large_error_eigen_index],
'b-^', label='max relative eigenfunction error')
ax1.set_xlabel(r'number of selected eigenmodes $\hat{L}$',fontsize = 32)
# ax1.legend(bbox_to_anchor=(1, 0.5))
ax1.set_yscale('log')
if relative_error:
ax1.set_ylabel('max linear evolving normalized error', color='b',fontsize = 32)
else:
ax1.set_ylabel('max error', color='b',fontsize = 32)
# plot error from reconstruction state from eigenfunction values
ax2.plot(np.arange(1,len(error_reconstruct_state_list)+1),
error_reconstruct_state_list,'r-o',
label='reconstruction normalized error')
if relative_error:
ax2.set_ylabel('reconstruction normalized error', color='r',fontsize = 32)
else:
ax2.set_ylabel('reconstruction error', color='r',fontsize = 32)
# ax2.set_ylim([-1,20])
ax2.set_yscale('log')
# set up ticks
yticks = ticker.LogLocator()
ax1.yaxis.set_major_locator(yticks)
ax2.yaxis.set_major_locator(yticks)
if type(y_scale_linear_error)!=type(None):
# set up range
ax1.set_ylim(y_scale_linear_error)
ax2.set_ylim(y_scale_recon_error)
# ax2.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/reConstr_decay_normalized_relative_eigen_error.png',
bbox_inches='tight')
plt.close()
## LOAD ComputeSaveNormalizedEigenError: fig3
fig3_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig3.npz')
top_k_modes_list = fig3_data['tkm_index_list']
self.top_k_modes_list = top_k_modes_list
# print out kou's result
if type(small_to_large_error_eigen_index_kou) != type(None):
print('as a comparison: index chosen by Kou criterion: ')
print(small_to_large_error_eigen_index_kou + 1)
print('corresponding abs sum:')
print(abs_sum_kou)
self.index_selected_in_full = self.small_to_large_error_eigen_index[:self.top_k_modes_list[-1] + 1]
# self.index_selected_in_full = self.small_to_large_error_eigen_index
## draw ComputeSaveNormalizedEigenError: fig3
# if the number is larger than 20, not plotting it
if len(top_k_modes_list) <= 20:
# fig. 3: plot normalized relative error for top K smallest error eigenmodes
plt.figure(figsize=FIG_SIZE)
cmap = self.get_cmap(len(top_k_modes_list))
for i in top_k_modes_list:
i_s = small_to_large_error_eigen_index[i]
plt.plot(true_tsnap, normalized_relative_error[:, i_s], '-', c=cmap(i),
label= str(i_s + 1) + 'th-eigenvalue: ' + "{0:.3f}".format(linearEvolvingEigen[i_s,i_s]))
# print eigenvectors
# print 'no. eigen vectors ', i_s+1
# print self.model.KoopmanEigenV[:, i_s]
plt.xlabel('time',fontsize = 32)
if relative_error:
plt.ylabel('normalized error',fontsize = 32)
else:
plt.ylabel('error',fontsize = 32)
plt.yscale('log')
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/top_' + str(len(top_k_modes_list)) + '_normalized_relative_eigen_error.png',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# load MTENET
fig4_data = np.load(self.eval_dir + '/MultiTaskElasticNet_result.npz')
alphas_enet = fig4_data['alphas_enet']
coefs_enet = fig4_data['coefs_enet']
residual_array = fig4_data['residual_array']
# coefficients vs alpha & number non-zero
num_target_components = coefs_enet.shape[0]
alphas_enet_log_negative = -np.log10(alphas_enet)
# print("coef_enet real= ", np.real(coefs_enet))
# print("coef_enet imag= ", np.imag(coefs_enet))
for i_component in range(num_target_components):
plt.figure(figsize=FIG_SIZE)
cmap = self.get_cmap(len(top_k_modes_list))
for i in top_k_modes_list:
i_s = small_to_large_error_eigen_index[i]
# as suggested by reviewer, not using the No..
# plt.plot(alphas_enet_log_negative, abs(coefs_enet[i_component,i,:]), '-*', c=cmap(i),
# label = 'No. ' + str(i + 1) + ', index = ' + str(i_s+1))
plt.plot(alphas_enet_log_negative, abs(coefs_enet[i_component,i,:]), '-*', c=cmap(i),
label = 'index = ' + str(i_s+1))
max_val = np.max(abs(coefs_enet[i_component, :, -1]))
min_val = np.min(abs(coefs_enet[i_component, :, -1]))
diss = (max_val - min_val)/2
mean = (max_val + min_val)/2
plt.xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32)
plt.ylabel('abs of coefficients',fontsize = 32)
plt.ylim([mean - diss*1.05, mean + diss*3])
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/multi-elastic-net-coef-' + str(i_component+1) + '.png',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# total number of non-zero terms1
plt.figure(figsize=FIG_SIZE)
num_non_zeros = [len((coefs_enet[i_component, abs(coefs_enet[i_component, :, ii]) >0*np.max(abs(coefs_enet[i_component,:,ii])), ii]))
for ii in range(coefs_enet.shape[2])]
plt.plot(alphas_enet_log_negative, num_non_zeros , 'k^-')
plt.xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32)
plt.ylabel('number of selected features',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/multi-elastic-net-coef-non-zeros-' + str(i_component+1) + '.png',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
num_non_zero_all_alpha = []
for ii in range(coefs_enet.shape[2]):
non_zero_index_per_alpha = []
for i_component in range(num_target_components):
# non_zero_index_per_alpha_per_target = abs(coefs_enet[i_component, :, ii]) > 0
non_zero_index_per_alpha_per_target = abs(coefs_enet[i_component, :, ii]) > 0*np.max(abs(coefs_enet[i_component, :, ii]))
non_zero_index_per_alpha.append(non_zero_index_per_alpha_per_target)
non_zero_index_per_alpha_all_targets = np.logical_or.reduce(non_zero_index_per_alpha)
num_non_zero_all_alpha.append(np.sum(non_zero_index_per_alpha_all_targets))
num_non_zero_all_alpha = np.array(num_non_zero_all_alpha)
# total residual vs alpha AND number of non-zero modes vs alpha
fig=plt.figure(figsize=FIG_SIZE)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(alphas_enet_log_negative, residual_array, 'k*-')
ax1.set_xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32)
ax1.set_ylabel('normalized reconstruction MSE',color='k',fontsize = 32)
# ax1.set_yscale('log')
ax2.plot(alphas_enet_log_negative, num_non_zero_all_alpha,'r*-')
ax2.set_ylabel('number of selected features',color='r',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/multi-elastic-net-mse.png',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
def pps_scatter_plot_eigenvalue(self, index_selected_array,
path_to_save,
zoomed_X_Y_max=None,
mag_contrib_all_kmd=None,
case_specific_frequency_dict={'draw_st':False}):
## read eigenvalues from model
ev = self.model.Koopman['eigenvalues']
## finally I decided to draw discrete time in discrete time
# if self.model.type == 'd':
# ev = np.log(ev) / self.dt
D_real = np.real(ev)
D_imag = np.imag(ev)
# 1+. eigenvalue distribution
plt.figure(figsize=FIG_SIZE)
plt.grid()
# draw all of the eigenvalue in the rank truncated case, i.e., original version of reduced KDMD
plt.scatter(D_real, D_imag, c='b', label='full')
# draw the one selected
plt.scatter(D_real[index_selected_array], D_imag[index_selected_array], c='r', label='selected')
if self.type == 'd':
# draw circle only for discrete model
theta = np.linspace(0, 2*np.pi ,200)
plt.plot(np.cos(theta), np.sin(theta), 'k-',alpha=0.2)
plt.xlabel(r'Real($\lambda$)',fontsize = 32)
plt.ylabel(r'Imag($\lambda$)',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
# if zoomed version
if type(zoomed_X_Y_max) != type(None):
plt.savefig(path_to_save + '/koopmanEigVal.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
if case_specific_frequency_dict['draw_st']:
## here we add St-lines-plot
theta_for_st = np.linspace(0, np.pi/2, 50)
length_array = np.linspace(0, 1.0, 2)
max_num_tot = case_specific_frequency_dict['max_num_st_lines']
tot = 0
for i, theta_line in enumerate(theta_for_st):
plt.plot(length_array*np.cos(theta_line), length_array*np.sin(theta_line), 'k--', alpha=0.3)
if tot < max_num_tot:
if i % 5 == 0:
tot += 1
St_sample = case_specific_frequency_dict['St_sample']
# cylinder case
# dt = 0.6
# U_infty = 1
# D = 2
# St_sample = D/(dt*U_infty)
St = St_sample * theta_line/(2*np.pi)
# ship case
#
if i == 0:
s = 'St=0'
else:
s = 'St='+"{0:.2f}".format(St)
plt.plot(length_array * np.cos(theta_line), length_array * np.sin(theta_line), 'k-')
plt.text(1.005*length_array[-1]*np.cos(theta_line), 1.005*length_array[-1]*np.sin(theta_line), s,
rotation=theta_line/np.pi*180*0.3 )
if case_specific_frequency_dict['characteristic_st_list'] != None:
for st_char_color_pair in case_specific_frequency_dict['characteristic_st_list']:
st_char = st_char_color_pair[0]
theta_st = st_char / St_sample * 2 * np.pi
plt.plot(length_array * np.cos(theta_st), length_array * np.sin(theta_st), st_char_color_pair[1])
# Std = 0.3012
# Stl = 0.1506
# # # Re 100
# # Std = 0.3386
# # Stl = 0.1694
# # Re 130
# # Std = 0.3634
# # Stl = 0.1816
plt.xlim(zoomed_X_Y_max[0])
plt.ylim(zoomed_X_Y_max[1])
plt.savefig(path_to_save + '/koopmanEigVal_zoomed.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
else:
plt.savefig(path_to_save + '/koopmanEigVal.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
# last, plot the reference plot for eigenvalue and its number
for index in index_selected_array:
plt.text(D_real[index], D_imag[index], str(index))
plt.savefig(path_to_save + '/koopmanEigVal_numbered.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
plt.close()
if self.compare_against_spdmd:
# plot against spDMD result
plt.figure(figsize=FIG_SIZE)
plt.grid()
# draw all of the eigenvalue in the rank truncated case, i.e., original version of reduced KDMD
# plt.scatter(D_real, D_imag, s=30, marker='3',c='b', label='full KDMD')
# draw the one selected
plt.scatter(D_real[index_selected_array], D_imag[index_selected_array], s=150, marker='o',c='r', label='spKDMD',edgecolors='k')
# load data from matlab solution of SPDMD
# data_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/20_pred.mat')
# data_full_dmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_pred.mat')
# data_spdmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_sp_pred.mat') # cylinder re 70
data_spdmd = loadmat('/home/shaowu/Documents/2016_PHD/PROJECTS/2019_aerospace/spdmd/200_sp_pred_ship.mat') # ship airwake
# normal DMD
# D_DMD_real = np.real(np.exp(data_dmd['Edmd']*self.dt))
# D_DMD_imag = np.imag(np.exp(data_dmd['Edmd']*self.dt))
# draw full DMD
# D_full_DMD_real = np.real(np.exp(data_full_dmd['Edmd']*self.dt))
# D_full_DMD_imag = np.imag(np.exp(data_full_dmd['Edmd']*self.dt))
# draw spDMD
# D_sp_DMD_real = np.real(np.exp(data_spdmd['Edmd_select']*self.dt))
# D_sp_DMD_imag = np.imag(np.exp(data_spdmd['Edmd_select']*self.dt))
D_sp_DMD_real = np.real(np.exp(data_spdmd['Edmd_select']*self.dt))
D_sp_DMD_imag = np.imag(np.exp(data_spdmd['Edmd_select']*self.dt))
# plt.scatter(D_DMD_real, D_DMD_imag, s=80, marker='d', c='yellow', label='DMD r=20',edgecolors='k')
# plt.scatter(D_full_DMD_real, D_full_DMD_imag, s=60,marker='s', c='lime', label='full DMD (r=200)',edgecolors='k')
plt.scatter(D_sp_DMD_real, D_sp_DMD_imag, s=50, marker='^', c='b', label='spDMD r=200', edgecolors='k')
# draw circle
theta = np.linspace(0, 2 * np.pi, 200)
plt.plot(np.cos(theta), np.sin(theta), 'k-',alpha=0.2)
plt.xlabel(r'Real($\lambda$)', fontsize=32)
plt.ylabel(r'Imag($\lambda$)', fontsize=32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
# if zoomed version
if type(zoomed_X_Y_max) != type(None):
plt.savefig(path_to_save + '/vs_spdmd_koopmanEigVal.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
if case_specific_frequency_dict['draw_st']:
## here we add St-lines-plot
theta_for_st = np.linspace(0, np.pi / 2, 50)
length_array = np.linspace(0, 1.0, 2)
max_num_tot = case_specific_frequency_dict['max_num_st_lines']
tot = 0
for i, theta_line in enumerate(theta_for_st):
plt.plot(length_array * np.cos(theta_line), length_array * np.sin(theta_line), 'k--', alpha=0.3)
if tot < max_num_tot:
if i % 5 == 0:
tot += 1
St_sample = case_specific_frequency_dict['St_sample']
# cylinder case
# dt = 0.6
# U_infty = 1
# D = 2
# St_sample = D/(dt*U_infty)
St = St_sample * theta_line / (2 * np.pi)
# ship case
#
if i == 0:
s = 'St=0'
else:
s = 'St=' + "{0:.2f}".format(St)
plt.plot(length_array * np.cos(theta_line), length_array * np.sin(theta_line), 'k-')
plt.text(1.005 * length_array[-1] * np.cos(theta_line), 1.005 * length_array[-1] * np.sin(theta_line), s,
rotation=theta_line / np.pi * 180 * 0.3)
if case_specific_frequency_dict['characteristic_st_list'] != None:
for st_char_color_pair in case_specific_frequency_dict['characteristic_st_list']:
st_char = st_char_color_pair[0]
theta_st = st_char / St_sample * 2 * np.pi
plt.plot(length_array * np.cos(theta_st), length_array * np.sin(theta_st), st_char_color_pair[1])
plt.xlim(zoomed_X_Y_max[0])
plt.ylim(zoomed_X_Y_max[1])
plt.savefig(path_to_save + '/vs_spdmd_koopmanEigVal_zoomed.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
else:
plt.savefig(path_to_save + '/vs_spdmd_koopmanEigVal.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
plt.close()
# print the final number of koopmans
print("final number of modes = ", len(D_real[index_selected_array]))
# if we would plot the mag. percent. of each mode
if type(mag_contrib_all_kmd) != type(None):
plt.figure(figsize=FIG_SIZE)
plt.plot(np.arange(1, len(mag_contrib_all_kmd)+1), mag_contrib_all_kmd)
plt.xticks(np.arange(1, len(mag_contrib_all_kmd)+1))
plt.xlabel('index of eigenvalue',fontsize = 32)
plt.ylabel('mag. percent. of each eigen-modes',fontsize = 32)
plt.close()
np.savez(self.pps_dir + 'eigenvalue_circle.npz',
ev_real=D_real[index_selected_array],
ev_imag=D_imag[index_selected_array],
mag_percent=mag_contrib_all_kmd)
def pps_eigenvalues(self, zoomed_X_Y_max=None, mag_contrib_all_kmd=None,case_specific_frequency_dict={'draw_st':False}):
# simply draw the scatter plot of eigenvalue
self.pps_scatter_plot_eigenvalue(index_selected_array=self.index_selected_in_full, path_to_save=self.pps_dir,
zoomed_X_Y_max=zoomed_X_Y_max,mag_contrib_all_kmd=mag_contrib_all_kmd,
case_specific_frequency_dict=case_specific_frequency_dict)
return
def plot_eigenfunction_given_index_and_path(self, index_of_eigens, save_path):
## load eigenfunction data
fig_data = np.load(self.model_dir + '/koopman_eigenfunctions.npz')
numKoopmanModes = fig_data['numKoopmanModes']
phi_eigen_array = fig_data['phi_eigen_array']
ndraw = fig_data['ndraw']
D_real = fig_data['D_real']
D_imag = fig_data['D_imag']
x1_ = fig_data['x1_']
x2_ = fig_data['x2_']
for ikoopman in index_of_eigens:
# R_i = R[:, ikoopman:ikoopman + 1]
# phi_eigen = np.matmul(phi_array, R_i)
phi_eigen = phi_eigen_array[:, ikoopman:ikoopman + 1]
phi_eigen_mesh = phi_eigen.reshape((ndraw, ndraw))
# draw || mag. of eigenfunction
plt.figure(figsize=FIG_SIZE)
plt.xlabel(r'$x_1$',fontsize = 32)
plt.ylabel(r'$x_2$',fontsize = 32)
plt.title(r'$\lambda$ = ' + "{0:.3f}".format(D_real[ikoopman]) + ' + ' + "{0:.3f}".format(D_imag[ikoopman]) + 'i')
plt.contourf(x1_, x2_, np.abs(phi_eigen_mesh), 100, cmap=plt.cm.get_cmap('jet'))
plt.colorbar(format=ticker.FuncFormatter(fmt))
plt.savefig(save_path + '/koopmanEigFunct_MAG_mode_' + str(ikoopman + 1) + '.png', bbox_inches='tight')
plt.close()
# draw phase angle of eigenfunction
plt.figure(figsize=FIG_SIZE)
plt.xlabel(r'$x_1$',fontsize = 32)
plt.ylabel(r'$x_2$',fontsize = 32)
plt.title(r'$\lambda$ = ' + "{0:.3f}".format(D_real[ikoopman]) + ' + ' + "{0:.3f}".format(D_imag[ikoopman]) + 'i')
plt.contourf(x1_, x2_, np.angle(phi_eigen_mesh), 100, cmap=plt.cm.get_cmap('jet'))
plt.colorbar(format=ticker.FuncFormatter(fmt))
plt.savefig(save_path + '/koopmanEigFunct_ANG_mode_' + str(ikoopman + 1) + '.png', bbox_inches='tight')
plt.close()
def pps_eigenfunction(self):
self.plot_eigenfunction_given_index_and_path(index_of_eigens=self.index_selected_in_full, save_path=self.pps_dir)
class ClassEDMDPPS(ClassDictPPS):
def __init__(self, pps_dir, eval_dir, model_dir, params,draw_eigen_function_plot=True):
super(ClassEDMDPPS, self).__init__(pps_dir, eval_dir, model_dir, params,draw_eigen_function_plot)
model_path = model_dir + '/edmd.model'
self.model = pickle.load(open(model_path, "rb"))
self.type = self.model.type
def pps_singular_value(self):
# load full singular value data
fig_data = np.load(self.eval_dir + '/sv.npz')
full_sv = fig_data['full_sv'] # this is the Ghat eigenvalues without any truncation
# plot singular value decay
plt.figure(figsize=FIG_SIZE)
plt.plot(np.arange(1, 1+len(full_sv)), full_sv, 'k-o', markersize=1)
plt.xlabel('number of terms',fontsize = 32)
plt.yscale('log')
plt.ylabel(r'singular values of $\Phi_X$',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/sv_full_phi_x.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
plt.close()
def pps_2d_simple_lusch_effect_svd_on_phi(self):
## LOAD data: compute_save_svd_effect_on_phi_edmd
fig_data = np.load(self.eval_dir + '/compute_save_svd_effect_on_phi_edmd.npz')
phi_ev_after_svd_list = fig_data['phi_r_ev_list']
phi_ev_before_svd = fig_data['phi_ev']
cmap = self.get_cmap(len(phi_ev_after_svd_list))
plt.figure(figsize=FIG_SIZE)
for i in range(len(phi_ev_after_svd_list)):
plt.plot(range(1, phi_ev_before_svd.shape[1] + 1), np.linalg.norm(phi_ev_after_svd_list[i], axis=0),
color=cmap(i), label='r = '+str(i+1),alpha=0.7)
plt.plot(range(1, phi_ev_before_svd.shape[1] + 1), np.linalg.norm(phi_ev_before_svd, axis=0), 'o-' ,
label='before SVD')
plt.xlabel(r'feature index',fontsize = 32)
plt.yscale('log')
plt.ylabel(r'$\Vert \Phi V_i \Vert_2$',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/effect_svd_on_phi.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
# plt.show()
plt.close()
def pps_lsq_spectrum(self):
X = self.model.Phi_X_i_sample
Y = self.model.Phi_Xdot_i_sample
np.savez(self.pps_dir + '/lsq_spectrum.npz', X=X,Y=Y)
class ClassKDMDPPS(ClassDictPPS):
def __init__(self, pps_dir, eval_dir, model_dir, params,draw_eigen_function_plot=True, compare_against_spdmd=False):
super(ClassKDMDPPS, self).__init__(pps_dir, eval_dir, model_dir, params, draw_eigen_function_plot, compare_against_spdmd)
model_path = model_dir + '/kdmd.model'
self.model = pickle.load(open(model_path, "rb"))
self.type = self.model.type
if self.type == 'd':
self.dt = self.model.dt
print('discrete mode, with dt = ', self.dt)
def pps_singular_value(self):
# load full singular value data
fig_data = np.load(self.eval_dir + '/sv_squared.npz')
full_sv_squared = fig_data['full_sv_squared'] # this is the Ghat eigenvalues without any truncation
# cut off at 1e-7 due to double precision
full_sv_squared = full_sv_squared[np.abs(full_sv_squared) > 1e-14]
# plot singular value decay
plt.figure(figsize=FIG_SIZE)
plt.plot(np.arange(1, 1+len(full_sv_squared)), np.sqrt(np.abs(full_sv_squared[::-1])),'k-o',markersize=1)
plt.xlabel('number of terms',fontsize = 32)
plt.yscale('log')
plt.ylabel(r'singular values of $\Phi_X$',fontsize = 32)
lgd = plt.legend(bbox_to_anchor=(1, 0.5))
plt.savefig(self.pps_dir + '/sv_full_phi_x.png', bbox_inches='tight', bbox_extra_artists=(lgd,))
plt.close()
#
# if __name__=='__main__':
# # testing on
# # case = '2d_lusch'
# case = '2d_duffing'
#
# if case == '2d_lusch':
#
#
#
# elif case == '2d_duffing':
#
# # 3. postprocessing for DLDMD model + apo eval
# pps_dldmd = ClassDLPPS(pps_dir='./2d_duffing_otto2017/dldmd',
# eval_dir='../eval_src/2d_duffing_otto2017-dl',
# model_dir='../model_src/result/2d_duffing_otto2017/dldmd_2019-02-16-03-58-22/model_saved',
# params={}
# )
#
# # plot eigenvalues
# pps_dldmd.pps_eigenvalues()
#
# # plot eigenfunctions
# pps_dldmd.pps_eigenfunction()
#
# # plot aposteriori component trj comparison
# pps_dldmd.pps_component_trj()
#
# # plot learning curve
# pps_dldmd.pps_learning_curve()
|
the-stack_0_20814 | from django.conf.urls import include, url, patterns
from cms.apphook_pool import apphook_pool
from cms.views import details
from django.conf import settings
if settings.APPEND_SLASH:
reg = url(r'^(?P<slug>[0-9A-Za-z-_.//]+)/$', details, name='pages-details-by-slug')
else:
reg = url(r'^(?P<slug>[0-9A-Za-z-_.//]+)$', details, name='pages-details-by-slug')
urlpatterns = [
# Public pages
url(r'^example/',
include('cms.test_utils.project.sampleapp.urls_example', namespace="example1", app_name='example_app')),
url(r'^example2/',
include('cms.test_utils.project.sampleapp.urls_example', namespace="example2", app_name='example_app')),
url(r'^$', details, {'slug': ''}, name='pages-root'),
reg,
]
if apphook_pool.get_apphooks():
"""If there are some application urls, add special resolver, so we will
have standard reverse support.
"""
from cms.appresolver import get_app_patterns
urlpatterns = get_app_patterns() + urlpatterns
#urlpatterns = (dynamic_app_regex_url_resolver, ) + urlpatterns
urlpatterns = patterns('', *urlpatterns)
|
the-stack_0_20819 | # -*- coding: utf-8 -*-
#
# pptk documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 10 11:29:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.imgmath',
'sphinx.ext.githubpages']
def setup(app):
app.add_stylesheet('css/custom.css')
imgmath_image_format = 'svg'
imgmath_dvisvgm = 'F:\Programs\dvisvgm-2.3.3-win64\dvisvgm.exe'
autodoc_member_order = 'groupwise'
autosummary_generate = True
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pptk'
copyright = u'2018, HERE Europe B.V.'
author = u'Victor Lu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pptkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pptk.tex', u'pptk Documentation',
u'Victor Lu, HERE Europe B.V.', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pptk', u'pptk Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pptk', u'pptk Documentation',
author, 'pptk', 'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_20820 | import os
from src.configuration.config import Configs
from src.utils.logging_engine import logger
# Output logs through console and files
def ini_logger(file_name, level='info'):
log_folder = os.path.join(Configs.instance_output_folder, 'log') # change output_folder to instance_output_fold
if not os.path.exists(log_folder):
os.makedirs(log_folder)
delete_files(log_folder, Configs.MAX_LOG_FILE_NUM)
log_file = os.path.join(log_folder, file_name)
logger.add_file_output(log_file, level)
def remove_file_handler_of_logging(file_name: str):
log_folder = os.path.join(Configs.instance_output_folder, 'log') # change output_folder to instance_output_fold
file_path = os.path.join(log_folder, file_name)
try:
logger.remove_file_handler(file_path)
except Exception as e:
print(f"Failed to remove file handler {file_path}, reason: {e}")
def delete_files(file_folder, max_num):
"""
Inputs:
- file_folder: 目标文件夹, 绝对路径
- max_num: 最大文件数量
"""
num = count_file(file_folder)
if num > max_num:
delete_num = max_num // 2
total_files_and_dirs = os.listdir(file_folder)
total_files = []
for item in total_files_and_dirs:
if not os.path.isdir(os.path.join(file_folder, item)):
total_files.append(item)
total_files.sort()
for i in range(delete_num):
os.remove(os.path.join(file_folder, total_files[i]))
def count_file(directory):
'''
计算目标文件夹下的文件数量, 不递归文件夹
'''
file_num = 0
if not os.path.exists(directory):
os.makedirs(directory)
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
file_num += 1
return file_num |
the-stack_0_20821 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Schedule plugin with a subclass of drivers.IrcDriver in order to be run as a
Supybot driver.
"""
from __future__ import with_statement
import time
import heapq
import functools
from threading import Lock
from . import drivers, log, world
class mytuple(tuple):
def __cmp__(self, other):
return cmp(self[0], other[0])
def __le__(self, other):
return self[0] <= other[0]
def __lt__(self, other):
return self[0] < other[0]
def __gt__(self, other):
return self[0] > other[0]
def __ge__(self, other):
return self[0] >= other[0]
class Schedule(drivers.IrcDriver):
"""An IrcDriver to handling scheduling of events.
Events, in this case, are functions accepting no arguments.
"""
def __init__(self):
drivers.IrcDriver.__init__(self)
self.schedule = []
self.events = {}
self.counter = 0
self.lock = Lock()
def reset(self):
with self.lock:
self.events.clear()
self.schedule[:] = []
# We don't reset the counter here because if someone has held an id of
# one of the nuked events, we don't want them removing new events with
# their old id.
def name(self):
return 'Schedule'
def addEvent(self, f, t, name=None, args=[], kwargs={}):
"""Schedules an event f to run at time t.
name must be hashable and not an int.
"""
if name is None:
name = self.counter
self.counter += 1
assert name not in self.events, \
'An event with the same name has already been scheduled.'
with self.lock:
self.events[name] = f
heapq.heappush(self.schedule, mytuple((t, name, args, kwargs)))
return name
def removeEvent(self, name):
"""Removes the event with the given name from the schedule."""
f = self.events.pop(name)
# We must heapify here because the heap property may not be preserved
# by the above list comprehension. We could, conceivably, just mark
# the elements of the heap as removed and ignore them when we heappop,
# but that would only save a constant factor (we're already linear for
# the listcomp) so I'm not worried about it right now.
with self.lock:
self.schedule = [x for x in self.schedule if x[1] != name]
heapq.heapify(self.schedule)
return f
def rescheduleEvent(self, name, t):
f = self.removeEvent(name)
self.addEvent(f, t, name=name)
def addPeriodicEvent(self, f, t, name=None, now=True, args=[], kwargs={},
count=None):
"""Adds a periodic event that is called every t seconds."""
def wrapper(count):
try:
f(*args, **kwargs)
finally:
# Even if it raises an exception, let's schedule it.
if count[0] is not None:
count[0] -= 1
if count[0] is None or count[0] > 0:
return self.addEvent(wrapper, time.time() + t, name)
wrapper = functools.partial(wrapper, [count])
if now:
return wrapper()
else:
return self.addEvent(wrapper, time.time() + t, name)
removePeriodicEvent = removeEvent
def run(self):
if len(drivers._drivers) == 1 and not world.testing:
log.error('Schedule is the only remaining driver, '
'why do we continue to live?')
time.sleep(1) # We're the only driver; let's pause to think.
while self.schedule and self.schedule[0][0] < time.time():
with self.lock:
(t, name, args, kwargs) = heapq.heappop(self.schedule)
f = self.events.pop(name)
try:
f(*args, **kwargs)
except Exception:
log.exception('Uncaught exception in scheduled function:')
schedule = Schedule()
addEvent = schedule.addEvent
removeEvent = schedule.removeEvent
rescheduleEvent = schedule.rescheduleEvent
addPeriodicEvent = schedule.addPeriodicEvent
removePeriodicEvent = removeEvent
run = schedule.run
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
the-stack_0_20822 | #!/usr/bin/env python
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: panos_check
short_description: check if PAN-OS device is ready for configuration
description:
- Check if PAN-OS device is ready for being configured (no pending jobs).
- The check could be done once or multiple times until the device is ready.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
- pandevice
notes:
- Panorama is supported.
- Checkmode is not supported.
extends_documentation_fragment:
- panos.transitional_provider
options:
initial_delay:
description:
- Length of time (in seconds) to wait before doing any readiness checks.
default: 0
type: int
timeout:
description:
- Length of time (in seconds) to wait for jobs to finish.
default: 60
type: int
interval:
description:
- Length of time (in seconds) to wait between checks.
default: 0
type: int
'''
EXAMPLES = '''
# Single check.
- name: check if ready
panos_check:
provider: '{{ provider }}'
timeout: 0
# Wait 2 minutes, then check every 5 seconds for 10 minutes.
- name: wait for reboot
panos_check:
provider: '{{ provider }}'
initial_delay: 120
interval: 5
timeout: 600
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.errors import PanDeviceError
except ImportError:
pass
def check_jobs(jobs):
for j in jobs:
status = j.find('.//status')
if status is None or status.text != 'FIN':
return False
return True
def main():
helper = get_connection(
with_classic_provider_spec=True,
argument_spec=dict(
initial_delay=dict(default=0, type='int'),
timeout=dict(default=60, type='int'),
interval=dict(default=0, type='int')
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of,
)
# Optional delay before performing readiness checks.
if module.params['initial_delay']:
time.sleep(module.params['initial_delay'])
timeout = module.params['timeout']
interval = module.params['interval']
end_time = time.time() + timeout
parent = helper.get_pandevice_parent(module, timeout)
# TODO(gfreeman) - consider param for "show chassis-ready".
while True:
try:
ans = parent.op(cmd="show jobs all")
except PanDeviceError:
pass
else:
jobs = ans.findall('.//job')
if check_jobs(jobs):
break
if time.time() > end_time:
module.fail_json(msg='Timeout')
time.sleep(interval)
module.exit_json(changed=True, msg="done")
if __name__ == '__main__':
main() |
the-stack_0_20823 | """Market module to interact with Serum DEX."""
from __future__ import annotations
from typing import List
from solana.account import Account
from solana.publickey import PublicKey
from solana.rpc.api import Client
from solana.rpc.types import RPCResponse, TxOpts
from solana.transaction import Transaction
from pyserum import instructions
import pyserum.market.types as t
from .._layouts.open_orders import OPEN_ORDERS_LAYOUT
from ..enums import OrderType, Side
from ..open_orders_account import OpenOrdersAccount
from ..utils import load_bytes_data
from ._internal.queue import decode_event_queue, decode_request_queue
from .orderbook import OrderBook
from .state import MarketState
from .core import MarketCore
LAMPORTS_PER_SOL = 1000000000
# pylint: disable=too-many-public-methods,abstract-method
class Market(MarketCore):
"""Represents a Serum Market."""
def __init__(self, conn: Client, market_state: MarketState, force_use_request_queue: bool = False) -> None:
super().__init__(market_state=market_state, force_use_request_queue=force_use_request_queue)
self._conn = conn
@classmethod
# pylint: disable=unused-argument
def load(
cls,
conn: Client,
market_address: PublicKey,
program_id: PublicKey = instructions.DEFAULT_DEX_PROGRAM_ID,
force_use_request_queue: bool = False,
) -> Market:
"""Factory method to create a Market.
:param conn: The connection that we use to load the data, created from `solana.rpc.api`.
:param market_address: The market address that you want to connect to.
:param program_id: The program id of the given market, it will use the default value if not provided.
"""
market_state = MarketState.load(conn, market_address, program_id)
return cls(conn, market_state, force_use_request_queue)
def find_open_orders_accounts_for_owner(self, owner_address: PublicKey) -> List[OpenOrdersAccount]:
return OpenOrdersAccount.find_for_market_and_owner(
self._conn, self.state.public_key(), owner_address, self.state.program_id()
)
def load_bids(self) -> OrderBook:
"""Load the bid order book"""
bytes_data = load_bytes_data(self.state.bids(), self._conn)
return self._parse_bids_or_asks(bytes_data)
def load_asks(self) -> OrderBook:
"""Load the ask order book."""
bytes_data = load_bytes_data(self.state.asks(), self._conn)
return self._parse_bids_or_asks(bytes_data)
def load_orders_for_owner(self, owner_address: PublicKey) -> List[t.Order]:
"""Load orders for owner."""
bids = self.load_bids()
asks = self.load_asks()
open_orders_accounts = self.find_open_orders_accounts_for_owner(owner_address)
return self._parse_orders_for_owner(bids, asks, open_orders_accounts)
def load_event_queue(self) -> List[t.Event]:
"""Load the event queue which includes the fill item and out item. For any trades two fill items are added to
the event queue. And in case of a trade, cancel or IOC order that missed, out items are added to the event
queue.
"""
bytes_data = load_bytes_data(self.state.event_queue(), self._conn)
return decode_event_queue(bytes_data)
def load_request_queue(self) -> List[t.Request]:
bytes_data = load_bytes_data(self.state.request_queue(), self._conn)
return decode_request_queue(bytes_data)
def load_fills(self, limit=100) -> List[t.FilledOrder]:
bytes_data = load_bytes_data(self.state.event_queue(), self._conn)
return self._parse_fills(bytes_data, limit)
def place_order( # pylint: disable=too-many-arguments,too-many-locals
self,
payer: PublicKey,
owner: Account,
order_type: OrderType,
side: Side,
limit_price: float,
max_quantity: float,
client_id: int = 0,
opts: TxOpts = TxOpts(),
) -> RPCResponse: # TODO: Add open_orders_address_key param and fee_discount_pubkey
transaction = Transaction()
signers: List[Account] = [owner]
open_order_accounts = self.find_open_orders_accounts_for_owner(owner.public_key())
if open_order_accounts:
place_order_open_order_account = open_order_accounts[0].address
else:
mbfre_resp = self._conn.get_minimum_balance_for_rent_exemption(OPEN_ORDERS_LAYOUT.sizeof())
place_order_open_order_account = self._after_oo_mbfre_resp(
mbfre_resp=mbfre_resp, owner=owner, signers=signers, transaction=transaction
)
# TODO: Cache new_open_orders_account
# TODO: Handle fee_discount_pubkey
self._prepare_order_transaction(
transaction=transaction,
payer=payer,
owner=owner,
order_type=order_type,
side=side,
signers=signers,
limit_price=limit_price,
max_quantity=max_quantity,
client_id=client_id,
open_order_accounts=open_order_accounts,
place_order_open_order_account=place_order_open_order_account,
)
return self._conn.send_transaction(transaction, *signers, opts=opts)
def cancel_order_by_client_id(
self, owner: Account, open_orders_account: PublicKey, client_id: int, opts: TxOpts = TxOpts()
) -> RPCResponse:
txs = self._build_cancel_order_by_client_id_tx(
owner=owner, open_orders_account=open_orders_account, client_id=client_id
)
return self._conn.send_transaction(txs, owner, opts=opts)
def cancel_order(self, owner: Account, order: t.Order, opts: TxOpts = TxOpts()) -> RPCResponse:
txn = self._build_cancel_order_tx(owner=owner, order=order)
return self._conn.send_transaction(txn, owner, opts=opts)
def match_orders(self, fee_payer: Account, limit: int, opts: TxOpts = TxOpts()) -> RPCResponse:
txn = self._build_match_orders_tx(limit)
return self._conn.send_transaction(txn, fee_payer, opts=opts)
def settle_funds( # pylint: disable=too-many-arguments
self,
owner: Account,
open_orders: OpenOrdersAccount,
base_wallet: PublicKey,
quote_wallet: PublicKey, # TODO: add referrer_quote_wallet.
opts: TxOpts = TxOpts(),
) -> RPCResponse:
# TODO: Handle wrapped sol accounts
should_wrap_sol = self._settle_funds_should_wrap_sol()
min_bal_for_rent_exemption = (
self._conn.get_minimum_balance_for_rent_exemption(165)["result"] if should_wrap_sol else 0
) # value only matters if should_wrap_sol
signers = [owner]
transaction = self._build_settle_funds_tx(
owner=owner,
signers=signers,
open_orders=open_orders,
base_wallet=base_wallet,
quote_wallet=quote_wallet,
min_bal_for_rent_exemption=min_bal_for_rent_exemption,
should_wrap_sol=should_wrap_sol,
)
return self._conn.send_transaction(transaction, *signers, opts=opts)
|
the-stack_0_20827 | # -*- coding: utf-8 -*-
"""Subclass of InteractiveShell for terminal based frontends."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <[email protected]>
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import bdb
import os
import sys
from IPython.core.error import TryNext, UsageError
from IPython.core.usage import interactive_usage
from IPython.core.inputsplitter import IPythonInputSplitter
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.lib.clipboard import ClipboardEmpty
from IPython.utils.encoding import get_stream_enc
from IPython.utils import py3compat
from IPython.utils.terminal import toggle_set_term_title, set_term_title
from IPython.utils.process import abbrev_cwd
from IPython.utils.warn import warn, error
from IPython.utils.text import num_ini_spaces, SList, strip_email_quotes
from traitlets import Integer, CBool, Unicode
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def get_default_editor():
try:
ed = os.environ['EDITOR']
if not py3compat.PY3:
ed = ed.decode()
return ed
except KeyError:
pass
except UnicodeError:
warn("$EDITOR environment variable is not pure ASCII. Using platform "
"default editor.")
if os.name == 'posix':
return 'vi' # the only one guaranteed to be there!
else:
return 'notepad' # same in Windows!
def get_pasted_lines(sentinel, l_input=py3compat.input, quiet=False):
""" Yield pasted lines until the user enters the given sentinel value.
"""
if not quiet:
print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
% sentinel)
prompt = ":"
else:
prompt = ""
while True:
try:
l = py3compat.str_to_unicode(l_input(prompt))
if l == sentinel:
return
else:
yield l
except EOFError:
print('<EOF>')
return
#------------------------------------------------------------------------
# Terminal-specific magics
#------------------------------------------------------------------------
@magics_class
class TerminalMagics(Magics):
def __init__(self, shell):
super(TerminalMagics, self).__init__(shell)
self.input_splitter = IPythonInputSplitter()
def store_or_execute(self, block, name):
""" Execute a block, or store it in a variable, per the user's request.
"""
if name:
# If storing it for further editing
self.shell.user_ns[name] = SList(block.splitlines())
print("Block assigned to '%s'" % name)
else:
b = self.preclean_input(block)
self.shell.user_ns['pasted_block'] = b
self.shell.using_paste_magics = True
try:
self.shell.run_cell(b)
finally:
self.shell.using_paste_magics = False
def preclean_input(self, block):
lines = block.splitlines()
while lines and not lines[0].strip():
lines = lines[1:]
return strip_email_quotes('\n'.join(lines))
def rerun_pasted(self, name='pasted_block'):
""" Rerun a previously pasted command.
"""
b = self.shell.user_ns.get(name)
# Sanity checks
if b is None:
raise UsageError('No previous pasted block available')
if not isinstance(b, py3compat.string_types):
raise UsageError(
"Variable 'pasted_block' is not a string, can't execute")
print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
self.shell.run_cell(b)
@line_magic
def autoindent(self, parameter_s = ''):
"""Toggle autoindent on/off (if available)."""
self.shell.set_autoindent()
print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
@line_magic
def cpaste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
'%cpaste -q' suppresses any additional output messages.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
paste: automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print " ".join(sorted(a))
:--
Hello world!
"""
opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
quiet = ('q' in opts)
sentinel = opts.get('s', u'--')
block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
self.store_or_execute(block, name)
@line_magic
def paste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options:
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
cpaste: manually paste code into terminal until you mark its end.
"""
opts, name = self.parse_options(parameter_s, 'rq', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
try:
block = self.shell.hooks.clipboard_get()
except TryNext as clipboard_exc:
message = getattr(clipboard_exc, 'args')
if message:
error(message[0])
else:
error('Could not get text from the clipboard.')
return
except ClipboardEmpty:
raise UsageError("The clipboard appears to be empty")
# By default, echo back to terminal unless quiet mode is requested
if 'q' not in opts:
write = self.shell.write
write(self.shell.pycolorize(block))
if not block.endswith('\n'):
write('\n')
write("## -- End pasted text --\n")
self.store_or_execute(block, name)
# Class-level: add a '%cls' magic only on Windows
if sys.platform == 'win32':
@line_magic
def cls(self, s):
"""Clear screen.
"""
os.system("cls")
#-----------------------------------------------------------------------------
# Main class
#-----------------------------------------------------------------------------
class TerminalInteractiveShell(InteractiveShell):
autoedit_syntax = CBool(False, config=True,
help="auto editing of files with syntax errors.")
confirm_exit = CBool(True, config=True,
help="""
Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
)
# This display_banner only controls whether or not self.show_banner()
# is called when mainloop/interact are called. The default is False
# because for the terminal based application, the banner behavior
# is controlled by the application.
display_banner = CBool(False) # This isn't configurable!
embedded = CBool(False)
embedded_active = CBool(False)
editor = Unicode(get_default_editor(), config=True,
help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
)
pager = Unicode('less', config=True,
help="The shell program to be used for paging.")
screen_length = Integer(0, config=True,
help=
"""Number of lines of your screen, used to control printing of very
long strings. Strings longer than this number of lines will be sent
through a pager instead of directly printed. The default value for
this is 0, which means IPython will auto-detect your screen size every
time it needs to print certain potentially long strings (this doesn't
change the behavior of the 'print' keyword, it's only triggered
internally). If for some reason this isn't working well (it needs
curses support), specify it yourself. Otherwise don't change the
default.""",
)
term_title = CBool(False, config=True,
help="Enable auto setting the terminal title."
)
usage = Unicode(interactive_usage)
# This `using_paste_magics` is used to detect whether the code is being
# executed via paste magics functions
using_paste_magics = CBool(False)
# In the terminal, GUI control is done via PyOS_InputHook
@staticmethod
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
"""
# Deferred import
from IPython.lib.inputhook import enable_gui as real_enable_gui
try:
return real_enable_gui(gui, app)
except ValueError as e:
raise UsageError("%s" % e)
system = InteractiveShell.system_raw
#-------------------------------------------------------------------------
# Overrides of init stages
#-------------------------------------------------------------------------
def init_display_formatter(self):
super(TerminalInteractiveShell, self).init_display_formatter()
# terminal only supports plaintext
self.display_formatter.active_types = ['text/plain']
#-------------------------------------------------------------------------
# Things related to the terminal
#-------------------------------------------------------------------------
@property
def usable_screen_length(self):
if self.screen_length == 0:
return 0
else:
num_lines_bot = self.separate_in.count('\n')+1
return self.screen_length - num_lines_bot
def _term_title_changed(self, name, new_value):
self.init_term_title()
def init_term_title(self):
# Enable or disable the terminal title.
if self.term_title:
toggle_set_term_title(True)
set_term_title('IPython: ' + abbrev_cwd())
else:
toggle_set_term_title(False)
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
# The parent class defines aliases that can be safely used with any
# frontend.
super(TerminalInteractiveShell, self).init_alias()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
aliases = [('clear', 'clear'), ('more', 'more'), ('less', 'less'),
('man', 'man')]
else :
aliases = []
for name, cmd in aliases:
self.alias_manager.soft_define_alias(name, cmd)
#-------------------------------------------------------------------------
# Mainloop and code execution logic
#-------------------------------------------------------------------------
def mainloop(self, display_banner=None):
"""Start the mainloop.
If an optional banner argument is given, it will override the
internally created default banner.
"""
with self.builtin_trap, self.display_trap:
while 1:
try:
self.interact(display_banner=display_banner)
#self.interact_with_readline()
# XXX for testing of a readline-decoupled repl loop, call
# interact_with_readline above
break
except KeyboardInterrupt:
# this should not be necessary, but KeyboardInterrupt
# handling seems rather unpredictable...
self.write("\nKeyboardInterrupt in interact()\n")
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell):
"""Store multiple lines as a single entry in history"""
# do nothing without readline or disabled multiline
if not self.has_readline or not self.multiline_history:
return hlen_before_cell
# windows rl has no remove_history_item
if not hasattr(self.readline, "remove_history_item"):
return hlen_before_cell
# skip empty cells
if not source_raw.rstrip():
return hlen_before_cell
# nothing changed do nothing, e.g. when rl removes consecutive dups
hlen = self.readline.get_current_history_length()
if hlen == hlen_before_cell:
return hlen_before_cell
for i in range(hlen - hlen_before_cell):
self.readline.remove_history_item(hlen - i - 1)
stdin_encoding = get_stream_enc(sys.stdin, 'utf-8')
self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(),
stdin_encoding))
return self.readline.get_current_history_length()
def interact(self, display_banner=None):
"""Closely emulate the interactive Python console."""
# batch run -> do not interact
if self.exit_now:
return
if display_banner is None:
display_banner = self.display_banner
if isinstance(display_banner, py3compat.string_types):
self.show_banner(display_banner)
elif display_banner:
self.show_banner()
more = False
if self.has_readline:
self.readline_startup_hook(self.pre_readline)
hlen_b4_cell = self.readline.get_current_history_length()
else:
hlen_b4_cell = 0
# exit_now is set by a call to %Exit or %Quit, through the
# ask_exit callback.
while not self.exit_now:
self.hooks.pre_prompt_hook()
if more:
try:
prompt = self.prompt_manager.render('in2')
except:
self.showtraceback()
if self.autoindent:
self.rl_do_indent = True
else:
try:
prompt = self.separate_in + self.prompt_manager.render('in')
except:
self.showtraceback()
try:
line = self.raw_input(prompt)
if self.exit_now:
# quick exit on sys.std[in|out] close
break
if self.autoindent:
self.rl_do_indent = False
except KeyboardInterrupt:
#double-guard against keyboardinterrupts during kbdint handling
try:
self.write('\n' + self.get_exception_only())
source_raw = self.input_splitter.raw_reset()
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
more = False
except KeyboardInterrupt:
pass
except EOFError:
if self.autoindent:
self.rl_do_indent = False
if self.has_readline:
self.readline_startup_hook(None)
self.write('\n')
self.exit()
except bdb.BdbQuit:
warn('The Python debugger has exited with a BdbQuit exception.\n'
'Because of how pdb handles the stack, it is impossible\n'
'for IPython to properly format this particular exception.\n'
'IPython will resume normal operation.')
except:
# exceptions here are VERY RARE, but they can be triggered
# asynchronously by signal handlers, for example.
self.showtraceback()
else:
try:
self.input_splitter.push(line)
more = self.input_splitter.push_accepts_more()
except SyntaxError:
# Run the code directly - run_cell takes care of displaying
# the exception.
more = False
if (self.SyntaxTB.last_syntax_error and
self.autoedit_syntax):
self.edit_syntax_error()
if not more:
source_raw = self.input_splitter.raw_reset()
self.run_cell(source_raw, store_history=True)
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
# Turn off the exit flag, so the mainloop can be restarted if desired
self.exit_now = False
def raw_input(self, prompt=''):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
Parameters
----------
prompt : str, optional
A string to be printed to prompt the user.
"""
# raw_input expects str, but we pass it unicode sometimes
prompt = py3compat.cast_bytes_py2(prompt)
try:
line = py3compat.cast_unicode_py2(self.raw_input_original(prompt))
except ValueError:
warn("\n********\nYou or a %run:ed script called sys.stdin.close()"
" or sys.stdout.close()!\nExiting IPython!\n")
self.ask_exit()
return ""
# Try to be reasonably smart about not re-indenting pasted input more
# than necessary. We do this by trimming out the auto-indent initial
# spaces, if the user's actual input started itself with whitespace.
if self.autoindent:
if num_ini_spaces(line) > self.indent_current_nsp:
line = line[self.indent_current_nsp:]
self.indent_current_nsp = 0
return line
#-------------------------------------------------------------------------
# Methods to support auto-editing of SyntaxErrors.
#-------------------------------------------------------------------------
def edit_syntax_error(self):
"""The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels.
"""
while self.SyntaxTB.last_syntax_error:
# copy and clear last_syntax_error
err = self.SyntaxTB.clear_err_state()
if not self._should_recompile(err):
return
try:
# may set last_syntax_error again if a SyntaxError is raised
self.safe_execfile(err.filename,self.user_ns)
except:
self.showtraceback()
else:
try:
f = open(err.filename)
try:
# This should be inside a display_trap block and I
# think it is.
sys.displayhook(f.read())
finally:
f.close()
except:
self.showtraceback()
def _should_recompile(self,e):
"""Utility routine for edit_syntax_error"""
if e.filename in ('<ipython console>','<input>','<string>',
'<console>','<BackgroundJob compilation>',
None):
return False
try:
if (self.autoedit_syntax and
not self.ask_yes_no('Return to editor to correct syntax error? '
'[Y/n] ','y')):
return False
except EOFError:
return False
def int0(x):
try:
return int(x)
except TypeError:
return 0
# always pass integer line and offset values to editor hook
try:
self.hooks.fix_error_editor(e.filename,
int0(e.lineno),int0(e.offset),e.msg)
except TryNext:
warn('Could not open editor')
return False
return True
#-------------------------------------------------------------------------
# Things related to exiting
#-------------------------------------------------------------------------
def ask_exit(self):
""" Ask the shell to exit. Can be overiden and used as a callback. """
self.exit_now = True
def exit(self):
"""Handle interactive exit.
This method calls the ask_exit callback."""
if self.confirm_exit:
if self.ask_yes_no('Do you really want to exit ([y]/n)?','y'):
self.ask_exit()
else:
self.ask_exit()
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
super(TerminalInteractiveShell, self).init_magics()
self.register_magics(TerminalMagics)
def showindentationerror(self):
super(TerminalInteractiveShell, self).showindentationerror()
if not self.using_paste_magics:
print("If you want to paste code into IPython, try the "
"%paste and %cpaste magic functions.")
InteractiveShellABC.register(TerminalInteractiveShell)
|
the-stack_0_20830 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from functools import partial
from netket.jax import compose, vmap_chunked
import jax
import jax.flatten_util
import jax.numpy as jnp
import numpy as np
from netket.stats import subtract_mean, sum
from netket.utils import mpi
from netket.utils.types import Array, Callable, PyTree, Scalar
from netket.jax import tree_cast, tree_conj, tree_axpy, tree_to_real
# TODO better name and move it somewhere sensible
def single_sample(forward_fn):
"""
A decorator to make the forward_fn accept a single sample
"""
def f(W, σ):
return forward_fn(W, σ[jnp.newaxis, :])[0]
return f
# TODO move it somewhere reasonable
def tree_subtract_mean(oks: PyTree) -> PyTree:
"""
subtract the mean with MPI along axis 0 of every leaf
"""
return jax.tree_map(partial(subtract_mean, axis=0), oks) # MPI
def jacobian_real_holo(
forward_fn: Callable, params: PyTree, samples: Array, chunk_size: int = None
) -> PyTree:
"""Calculates Jacobian entries by vmapping grad.
Assumes the function is R→R or holomorphic C→C, so single grad is enough
Args:
forward_fn: the log wavefunction ln Ψ
params : a pytree of parameters p
samples : an array of n samples σ
Returns:
The Jacobian matrix ∂/∂pₖ ln Ψ(σⱼ) as a PyTree
"""
def _jacobian_real_holo(forward_fn, params, samples):
y, vjp_fun = jax.vjp(single_sample(forward_fn), params, samples)
res, _ = vjp_fun(np.array(1.0, dtype=jnp.result_type(y)))
return res
return vmap_chunked(
_jacobian_real_holo, in_axes=(None, None, 0), chunk_size=chunk_size
)(forward_fn, params, samples)
def jacobian_cplx(
forward_fn: Callable,
params: PyTree,
samples: Array,
chunk_size: int = None,
_build_fn: Callable = partial(jax.tree_multimap, jax.lax.complex),
) -> PyTree:
"""Calculates Jacobian entries by vmapping grad.
Assumes the function is R→C, backpropagates 1 and -1j
Args:
forward_fn: the log wavefunction ln Ψ
params : a pytree of parameters p
samples : an array of n samples σ
Returns:
The Jacobian matrix ∂/∂pₖ ln Ψ(σⱼ) as a PyTree
"""
def _jacobian_cplx(forward_fn, params, samples, _build_fn):
y, vjp_fun = jax.vjp(single_sample(forward_fn), params, samples)
gr, _ = vjp_fun(np.array(1.0, dtype=jnp.result_type(y)))
gi, _ = vjp_fun(np.array(-1.0j, dtype=jnp.result_type(y)))
return _build_fn(gr, gi)
return vmap_chunked(
_jacobian_cplx, in_axes=(None, None, 0, None), chunk_size=chunk_size
)(forward_fn, params, samples, _build_fn)
centered_jacobian_real_holo = compose(tree_subtract_mean, jacobian_real_holo)
centered_jacobian_cplx = compose(tree_subtract_mean, jacobian_cplx)
def _divide_by_sqrt_n_samp(oks, samples):
"""
divide Oⱼₖ by √n
"""
n_samp = samples.shape[0] * mpi.n_nodes # MPI
return jax.tree_map(lambda x: x / np.sqrt(n_samp), oks)
def _multiply_by_pdf(oks, pdf):
"""
Computes O'ⱼ̨ₖ = Oⱼₖ pⱼ .
Used to multiply the log-derivatives by the probability density.
"""
return jax.tree_map(
lambda x: jax.lax.broadcast_in_dim(pdf, x.shape, (0,)) * x,
oks,
)
def stack_jacobian(centered_oks: PyTree) -> PyTree:
"""
Return the real and imaginary parts of ΔOⱼₖ stacked along the sample axis
Re[S] = Re[(ΔOᵣ + i ΔOᵢ)ᴴ(ΔOᵣ + i ΔOᵢ)] = ΔOᵣᵀ ΔOᵣ + ΔOᵢᵀ ΔOᵢ = [ΔOᵣ ΔOᵢ]ᵀ [ΔOᵣ ΔOᵢ]
"""
return jax.tree_map(
lambda x: jnp.concatenate([x.real, x.imag], axis=0), centered_oks
)
def stack_jacobian_tuple(centered_oks_re_im):
"""
stack the real and imaginary parts of ΔOⱼₖ along the sample axis
Re[S] = Re[(ΔOᵣ + i ΔOᵢ)ᴴ(ΔOᵣ + i ΔOᵢ)] = ΔOᵣᵀ ΔOᵣ + ΔOᵢᵀ ΔOᵢ = [ΔOᵣ ΔOᵢ]ᵀ [ΔOᵣ ΔOᵢ]
Args:
centered_oks_re_im : a tuple (ΔOᵣ, ΔOᵢ) of two PyTrees representing the real and imag part of ΔOⱼₖ
"""
return jax.tree_multimap(
lambda re, im: jnp.concatenate([re, im], axis=0), *centered_oks_re_im
)
def _rescale(centered_oks):
"""
compute ΔOₖ/√Sₖₖ and √Sₖₖ
to do scale-invariant regularization (Becca & Sorella 2017, pp. 143)
Sₖₗ/(√Sₖₖ√Sₗₗ) = ΔOₖᴴΔOₗ/(√Sₖₖ√Sₗₗ) = (ΔOₖ/√Sₖₖ)ᴴ(ΔOₗ/√Sₗₗ)
"""
scale = jax.tree_map(
lambda x: mpi.mpi_sum_jax(jnp.sum((x * x.conj()).real, axis=0, keepdims=True))[
0
]
** 0.5,
centered_oks,
)
centered_oks = jax.tree_multimap(jnp.divide, centered_oks, scale)
scale = jax.tree_map(partial(jnp.squeeze, axis=0), scale)
return centered_oks, scale
def _jvp(oks: PyTree, v: PyTree) -> Array:
"""
Compute the matrix-vector product between the pytree jacobian oks and the pytree vector v
"""
td = lambda x, y: jnp.tensordot(x, y, axes=y.ndim)
return jax.tree_util.tree_reduce(jnp.add, jax.tree_multimap(td, oks, v))
def _vjp(oks: PyTree, w: Array) -> PyTree:
"""
Compute the vector-matrix product between the vector w and the pytree jacobian oks
"""
res = jax.tree_map(partial(jnp.tensordot, w, axes=1), oks)
return jax.tree_map(lambda x: mpi.mpi_sum_jax(x)[0], res) # MPI
def _mat_vec(v: PyTree, oks: PyTree) -> PyTree:
"""
Compute ⟨O† O⟩v = ∑ₗ ⟨Oₖᴴ Oₗ⟩ vₗ
"""
res = tree_conj(_vjp(oks, _jvp(oks, v).conjugate()))
return tree_cast(res, v)
# ==============================================================================
# the logic above only works for R→R, R→C and holomorphic C→C
# here the other modes are converted
@partial(jax.jit, static_argnames=("apply_fun", "mode", "rescale_shift", "chunk_size"))
def prepare_centered_oks(
apply_fun: Callable,
params: PyTree,
samples: Array,
model_state: Optional[PyTree],
mode: str,
rescale_shift: bool,
pdf=None,
chunk_size: int = None,
) -> PyTree:
"""
compute ΔOⱼₖ = Oⱼₖ - ⟨Oₖ⟩ = ∂/∂pₖ ln Ψ(σⱼ) - ⟨∂/∂pₖ ln Ψ⟩
divided by √n
In a somewhat intransparent way this also internally splits all parameters to real
in the 'real' and 'complex' modes (for C→R, R&C→R, R&C→C and general C→C) resulting in the respective ΔOⱼₖ
which is only compatible with split-to-real pytree vectors
Args:
apply_fun: The forward pass of the Ansatz
params : a pytree of parameters p
samples : an array of (n in total) batched samples σ
model_state: untrained state parameters of the model
mode: differentiation mode, must be one of 'real', 'complex', 'holomorphic'
rescale_shift: whether scale-invariant regularisation should be used (default: True)
pdf: |ψ(x)|^2 if exact optimization is being used else None
chunk_size: an int specfying the size of the chunks the gradient should be computed in (default: None)
Returns:
if not rescale_shift:
a pytree representing the centered jacobian of ln Ψ evaluated at the samples σ, divided by √n;
None
else:
the same pytree, but the entries for each parameter normalised to unit norm;
pytree containing the norms that were divided out (same shape as params)
"""
# un-batch the samples
samples = samples.reshape((-1, samples.shape[-1]))
# pre-apply the model state
def forward_fn(W, σ):
return apply_fun({"params": W, **model_state}, σ)
if mode == "real":
split_complex_params = True # convert C→R and R&C→R to R→R
centered_jacobian_fun = centered_jacobian_real_holo
jacobian_fun = jacobian_real_holo
elif mode == "complex":
split_complex_params = True # convert C→C and R&C→C to R→C
# centered_jacobian_fun = compose(stack_jacobian, centered_jacobian_cplx)
# avoid converting to complex and then back
# by passing around the oks as a tuple of two pytrees representing the real and imag parts
centered_jacobian_fun = compose(
stack_jacobian_tuple,
partial(centered_jacobian_cplx, _build_fn=lambda *x: x),
)
jacobian_fun = jacobian_cplx
elif mode == "holomorphic":
split_complex_params = False
centered_jacobian_fun = centered_jacobian_real_holo
jacobian_fun = jacobian_real_holo
else:
raise NotImplementedError(
'Differentiation mode should be one of "real", "complex", or "holomorphic", got {}'.format(
mode
)
)
if split_complex_params:
# doesn't do anything if the params are already real
params, reassemble = tree_to_real(params)
def f(W, σ):
return forward_fn(reassemble(W), σ)
else:
f = forward_fn
if pdf is None:
centered_oks = _divide_by_sqrt_n_samp(
centered_jacobian_fun(
f,
params,
samples,
chunk_size=chunk_size,
),
samples,
)
else:
oks = jacobian_fun(f, params, samples)
oks_mean = jax.tree_map(partial(sum, axis=0), _multiply_by_pdf(oks, pdf))
centered_oks = jax.tree_multimap(lambda x, y: x - y, oks, oks_mean)
centered_oks = _multiply_by_pdf(centered_oks, jnp.sqrt(pdf))
if rescale_shift:
return _rescale(centered_oks)
else:
return centered_oks, None
def mat_vec(v: PyTree, centered_oks: PyTree, diag_shift: Scalar) -> PyTree:
"""
Compute (S + δ) v = 1/n ⟨ΔO† ΔO⟩v + δ v = ∑ₗ 1/n ⟨ΔOₖᴴΔOₗ⟩ vₗ + δ vₗ
Only compatible with R→R, R→C, and holomorphic C→C
for C→R, R&C→R, R&C→C and general C→C the parameters for generating ΔOⱼₖ should be converted to R,
and thus also the v passed to this function as well as the output are expected to be of this form
Args:
v: pytree representing the vector v compatible with centered_oks
centered_oks: pytree of gradients 1/√n ΔOⱼₖ
diag_shift: a scalar diagonal shift δ
Returns:
a pytree corresponding to the sr matrix-vector product (S + δ) v
"""
return tree_axpy(diag_shift, v, _mat_vec(v, centered_oks))
|
the-stack_0_20831 | # 2022 eCTF
# Attack-Phase Image Update Routine
# Jake Grycel
#
# (c) 2022 The MITRE Corporation
#
# This source file is part of an example system for MITRE's 2022 Embedded System
# CTF (eCTF). This code is being provided only for educational purposes for the
# 2022 MITRE eCTF competition, and may not meet MITRE standards for quality.
# Use this code at your own risk!
import argparse
from pathlib import Path
from serial import Serial
from serial.tools import list_ports
success_codes = [1, 2, 3, 5, 6, 7, 9, 10, 12, 13, 15, 18, 21, 22, 23]
error_codes = [4, 11, 14, 16, 17, 19, 20]
UPDATE_COMMAND = b"\x00"
# Wait for expected bootloader repsonse byte
# Exit if response does not match
def verify_resp(ser, print_out=True):
resp = ser.read(1)
while (resp == b"") or (not ord(resp) in (success_codes + error_codes)):
resp = ser.read(1)
if ord(resp) not in success_codes:
print(f"Error. Bootloader responded with: {ord(resp)}")
exit()
if print_out:
print(f"Success. Bootloader responded with code {ord(resp)}")
return ord(resp)
# Run full application update
def image_update(in_file):
# USAGE:
# 1. Start this script
# 2. Start the device
#
# This script finds the correct serial port by looking at an initial
# list and waiting for a new entry to show up.
# It then assumes that is the correct port and tries an update
# Look for a serial port to open
print("Looking for new serial port to open...")
search = True
orig_port_list = list_ports.comports()
orig_len = len(orig_port_list)
while search:
new_port_list = list_ports.comports()
new_len = len(new_port_list)
if new_len == (orig_len + 1):
for port in new_port_list:
if port not in orig_port_list:
com_port = port.device
search = False
break
elif new_len != orig_len:
# Something changed, so we adapt
orig_port_list = new_port_list
orig_len = new_len
# Keep trying to connect
while 1:
try:
ser = Serial(com_port, 115200, timeout=2)
ser.reset_input_buffer()
break
except Exception:
pass
print(f"Connected to bootloader on {com_port}")
# Open protected image
img_file = Path(in_file)
if not img_file.exists():
print(f"Image file {img_file} not found. Exiting")
exit()
with open(img_file, "rb") as image_fp:
# Send update command
print("Requesting update")
ser.write(UPDATE_COMMAND)
# Wait for initial status messages to synchronize
resp = -1
while resp != success_codes[2]:
resp = verify_resp(ser)
# Send image and verify each block success
print("Update started")
print("Sending image data")
block_bytes = image_fp.read(16)
count = 0
while block_bytes != b"":
if (count % 100) == 0:
print(f"Sending block {count}")
count += 1
ser.write(block_bytes)
verify_resp(ser, print_out=False)
block_bytes = image_fp.read(16)
# Wait for update finish
print("\nListening for update status...\n")
resp = -1
while resp != success_codes[-1]:
resp = verify_resp(ser)
print("\nUpdate Complete!\n")
# Run in application mode
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(
description="Tool for loading designs into the keyed attack-phase device",
)
parser.add_argument("--infile", required=True, help="Path to the input binary")
args = parser.parse_args()
image_update(args.infile)
|
the-stack_0_20832 | #!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import kivy
kivy.require('1.9.0')
Initialize_Window_Width = 1280
Initialize_Window_Height = 800
from kivy.config import Config
Config.set('graphics', 'width', str(Initialize_Window_Width))
Config.set('graphics', 'height', str(Initialize_Window_Height))
from kivy.utils import platform as Kivy_Platform
from kivy.lang.builder import Builder
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.slider import Slider
from kivy.uix.label import Label
from kivy.uix.image import Image # , AsyncImage
from kivy.clock import Clock
from kivy.graphics import Rectangle, Color
from kivy.properties import ListProperty
from kivy.factory import Factory
import os, sys
import platform
import datetime, calendar
import math
execution_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
os_platform = platform.system()
path_to_time_slider_cursor = ""
path_to_time_slider_cursor_disabled = ""
path_to_cwremote_screen_image = ""
if (os_platform == "Darwin"):
execution_directory = execution_directory.split("CW_Remote.app")[0]
def resource_path ( relative_path ):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = execution_directory
return os.path.join(base_path, relative_path)
path_to_icon_image = resource_path(os.path.join("data", "cwremote-icon-512.png"))
path_to_time_slider_cursor = resource_path(os.path.join("data", "time_slider_cursor.png"))
path_to_time_slider_cursor_disabled = resource_path(os.path.join("data", "time_slider_cursor_disabled.png"))
path_to_cwremote_screen_image = resource_path(os.path.join("data", "CW_Remote_Screen.png"))
Config.set('kivy','window_icon', path_to_icon_image)
Config.write()
elif (os_platform == "Linux"):
if (Kivy_Platform == "android"):
pass
elif (os_platform == "Windows"):
pass
else:
pass
# Convenience function to bound a value
def bound ( low, high, value ):
return max(low, min(high, value))
# Local wall time, this works for New York City
class Time_Zone ( datetime.tzinfo ):
def __init__(self, offset_in_minutes):
super(Time_Zone, self).__init__()
self.offset = offset_in_minutes
def utcoffset(self, dt):
return datetime.timedelta(minutes=self.offset)
def tzname(self, dt):
return ""
def dst(self, dt):
return datetime.timedelta(0)
UTC_Time_Zone = Time_Zone(0)
Eastern_Daylight_Time_Zone = Time_Zone(-4 * 60)
Eastern_Standard_Time_Zone = Time_Zone(-5 * 60)
def NYC_Wall_DateTime_Offset ( Time_Zone_Aware_DateTime ):
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Standard_Time_Zone)
# Test whether in primetime
begin_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=3, day=8, hour=2, tzinfo=Eastern_Standard_Time_Zone)
begin_daylight_savings += datetime.timedelta(days=(6 - begin_daylight_savings.date().weekday()))
end_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=11, day=1, hour=1, tzinfo=Eastern_Standard_Time_Zone)
end_daylight_savings += datetime.timedelta(days=(6 - end_daylight_savings.date().weekday()))
if ((datetime_nyc_wall >= begin_daylight_savings) and (datetime_nyc_wall <= end_daylight_savings)):
datetime_nyc_wall_offset = "-0400"
else: datetime_nyc_wall_offset = "-0500"
return datetime_nyc_wall_offset
def NYC_Wall_DateTime ( Time_Zone_Aware_DateTime ):
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Standard_Time_Zone)
# Test whether in primetime
begin_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=3, day=8, hour=2, tzinfo=Eastern_Standard_Time_Zone)
begin_daylight_savings += datetime.timedelta(days=(6 - begin_daylight_savings.date().weekday()))
end_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=11, day=1, hour=1, tzinfo=Eastern_Standard_Time_Zone)
end_daylight_savings += datetime.timedelta(days=(6 - end_daylight_savings.date().weekday()))
if ((datetime_nyc_wall >= begin_daylight_savings) and (datetime_nyc_wall <= end_daylight_savings)):
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Daylight_Time_Zone)
return datetime_nyc_wall
def Return_NYC_Wall_Time_String ( UTC_Datetime=None, NYC_Wall_Datetime=None, Time_Zone_Indicator="" ):
if (UTC_Datetime is not None):
datetime_NYC_Wall = NYC_Wall_DateTime(UTC_Datetime)
elif (NYC_Wall_Datetime is not None):
datetime_NYC_Wall = NYC_Wall_Datetime
else:
datetime_NYC_Wall = None
isoformatted_datetime_NYC_Wall = datetime_NYC_Wall.isoformat()
if (Time_Zone_Indicator == "E"):
return isoformatted_datetime_NYC_Wall[:-6]
if (datetime_NYC_Wall is not None):
return (isoformatted_datetime_NYC_Wall + Time_Zone_Indicator)
else: return "Error"
def Period_Span_NYC_Wall_Time ( Period_Hours, Period_End_Hours_Ago ):
datetime_now_utc = datetime.datetime.now(UTC_Time_Zone)
period_end_utc = datetime_now_utc - datetime.timedelta(hours=Period_End_Hours_Ago)
period_begin_utc = period_end_utc - datetime.timedelta(hours=Period_Hours)
period_begin_NYC_Wall = NYC_Wall_DateTime(period_begin_utc)
period_end_NYC_Wall = NYC_Wall_DateTime(period_end_utc)
period_begin_nyc_wall_string = \
Return_NYC_Wall_Time_String(NYC_Wall_Datetime=period_begin_NYC_Wall, Time_Zone_Indicator="E")[:-10].replace("T", " ")
period_end_nyc_wall_string = \
Return_NYC_Wall_Time_String(NYC_Wall_Datetime=period_end_NYC_Wall, Time_Zone_Indicator="E")[:-10].replace("T", " ")
return (calendar.day_abbr[period_begin_NYC_Wall.weekday()] + " " + period_begin_nyc_wall_string + "NYC to " +
calendar.day_abbr[period_end_NYC_Wall.weekday()] + " " + period_end_nyc_wall_string + "NYC")
# Since this is a "static" widget, it's more convenient to create as kv
Builder.load_string(
"""
<VerticalTabBarBoxLayout>:
orientation: 'vertical'
canvas:
Color:
rgba: 0.75, 0.95, 1, 1
Rectangle:
pos: self.pos
size: self.size
size_hint: (0.02, 1)
Widget:
Button:
padding: (15, 5)
on_press: root.trigger_on_press_previous()
center_x: self.parent.center_x
center_y: self.parent.top - (self.texture_size[0] / 2.0)
size: self.texture_size
canvas.before:
PushMatrix
Rotate:
angle: 90
origin: self.center
canvas.after:
PopMatrix
text: "Previous"
Button:
padding: (15, 5)
on_press: root.trigger_on_press_simplex()
center_x: self.parent.center_x
center_y: self.parent.top - (self.parent.height * 0.3)
size: self.texture_size
canvas.before:
PushMatrix
Rotate:
angle: 90
origin: self.center
canvas.after:
PopMatrix
text: "Simplex"
Button:
padding: (15, 5)
on_press: root.trigger_on_press_help()
center: self.parent.center
size: self.texture_size
canvas.before:
PushMatrix
Rotate:
angle: 90
origin: self.center
canvas.after:
PopMatrix
text: "Help"
Button:
padding: (15, 5)
on_press: root.trigger_on_press_duplex()
center_x: self.parent.center_x
center_y: self.parent.top - (self.parent.height * 0.7)
size: self.texture_size
canvas.before:
PushMatrix
Rotate:
angle: 90
origin: self.center
canvas.after:
PopMatrix
text: "Duplex"
Button:
padding: (15, 5)
on_press: root.trigger_on_press_next()
center_x: self.parent.center_x
center_y: self.parent.top - self.parent.height + (self.texture_size[0] / 2.0)
size: self.texture_size
canvas.before:
PushMatrix
Rotate:
angle: 90
origin: self.center
canvas.after:
PopMatrix
text: "Next"
""")
class VerticalTabBarBoxLayout(BoxLayout):
def __init__(self, **kwargs):
super(VerticalTabBarBoxLayout, self).__init__(**kwargs)
self.register_event_type('on_press_previous')
self.register_event_type('on_press_next')
self.register_event_type('on_press_simplex')
self.register_event_type('on_press_duplex')
self.register_event_type('on_press_help')
def trigger_on_press_previous(self, *args):
self.dispatch('on_press_previous')
def on_press_previous(self, *args):
pass
def trigger_on_press_next(self, *args):
self.dispatch('on_press_next')
def on_press_next(self, *args):
pass
def trigger_on_press_simplex(self, *args):
self.dispatch('on_press_simplex')
def on_press_simplex(self, *args):
pass
def trigger_on_press_duplex(self, *args):
self.dispatch('on_press_duplex')
def on_press_duplex(self, *args):
pass
def trigger_on_press_help(self, *args):
self.dispatch('on_press_help')
def on_press_help(self, *args):
pass
# This slider extension allows the code to avoid the very expensive refreshes of ...
# ... the widget images until the user has stopped sliding the slider. Refresh then.
class SliderExtended(Slider):
def __init__(self, **kwargs):
self.register_event_type('on_release')
super(SliderExtended, self).__init__(**kwargs)
def on_release(self):
pass
# Because there appears to be no event for touch_up, ...
# ... override on_touch_up and create a custom event
def on_touch_up(self, touch):
super(SliderExtended, self).on_touch_up(touch)
if (touch.grab_current == self):
self.dispatch('on_release')
return True
# Since this is a relatively complicated "dynamic" widget, ...
# ... it's more convenient to render as Python code.
class TimeSpanControlBar(BoxLayout):
Period_Duration_Steps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, # 18
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, # 12
50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, # 12
74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, # 12
100, 104, 108, 112, 116, 120, # 6
124, 128, 132, 136, 140, 144, # 6
148, 152, 156, 160, 164, 168] # 6
Period_Hours_Ago_Steps = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, # 19
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, # 12
50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, # 12
74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, # 12
100, 104, 108, 112, 116, 120, # 6
124, 128, 132, 136, 140, 144, # 6
148, 152, 156, 160, 164, 168] # 6
def __init__(self, **kwargs):
self.register_event_type('on_release')
self._period_duration_hours = 24
self._period_end_hours_ago = 0
slider_minimum_value = -1000
period_duration_slider_maximum_value = -1
self.period_duration_slider_value_span = period_duration_slider_maximum_value - slider_minimum_value
period_end_slider_maximum_value = 0
self.period_end_slider_value_span = period_end_slider_maximum_value - slider_minimum_value
super(TimeSpanControlBar, self).__init__(**kwargs)
self.period_duration_label = Label(text="proxy", size_hint=(0.075, 1))
self.period_duration_slider = \
SliderExtended(cursor_image=path_to_time_slider_cursor,
cursor_disabled_image=path_to_time_slider_cursor_disabled,
cursor_height=28,
border_horizontal=[0, 0, 0, 0], padding=12,
min=slider_minimum_value, max=period_duration_slider_maximum_value,
value=period_duration_slider_maximum_value, step=1, size_hint=(0.4, 1))
self.period_duration_slider.bind(value=self._on_period_duration_value_change)
self.period_duration_slider.bind(on_release=self._trigger_on_release)
refresh_button = Button(text="Refresh", size_hint=(0.05, 1))
refresh_button.font_size = 14
refresh_button.bind(on_press=self._trigger_on_release)
self.period_end_slider = \
SliderExtended(cursor_image=path_to_time_slider_cursor,
cursor_disabled_image=path_to_time_slider_cursor_disabled,
cursor_height=28,
border_horizontal=[0, 0, 0, 0], padding=12,
min=slider_minimum_value, max=period_end_slider_maximum_value,
value=period_end_slider_maximum_value, step=1, size_hint=(0.4, 1))
self.period_end_slider.bind(value=self._on_period_end_value_change)
self.period_end_slider.bind(on_release=self._trigger_on_release)
self.period_end_label = Label(text="proxy", size_hint=(0.075, 1))
self.add_widget(self.period_duration_label)
self.add_widget(self.period_duration_slider)
self.add_widget(refresh_button)
self.add_widget(self.period_end_slider)
self.add_widget(self.period_end_label)
self.set_period_duration_value(self._period_duration_hours)
self.set_period_end_value(self._period_end_hours_ago)
# Public functions (used to synchronize multiple TimeSpanControlBars) ...
def set_period_duration_value(self, period_duration_value, *args):
self._period_duration_hours = period_duration_value
self.period_duration_label.text = (self._period_value_display(self._period_duration_hours))
self.period_duration_slider.value = -(self.period_duration_slider_value_span *
(self.Period_Duration_Steps.index(self._period_duration_hours) /
len(self.Period_Duration_Steps)))
def set_period_end_value(self, period_end_value, *args):
self._period_end_hours_ago = period_end_value
self.period_end_slider.value = -(self.period_end_slider_value_span *
(self.Period_Hours_Ago_Steps.index(self._period_end_hours_ago) /
len(self.Period_Hours_Ago_Steps)))
self.period_end_label.text = (self._period_value_display(self._period_end_hours_ago) + " ago")
# ... Public functions (used to synchronize multiple TimeSpanControlBars)
# Private functions ...
def _period_value_display(self, Period_Value):
period_value_string = ""
if ((Period_Value // 24) > 0): period_value_string += str(Period_Value // 24) + "D"
if (((Period_Value % 24) > 0) or (len(period_value_string) == 0)):
if (len(period_value_string) > 0): period_value_string += " "
period_value_string += str(Period_Value % 24) + "H"
return period_value_string
def _on_period_duration_value_change(self, instance, period_duration_slider_value, *args):
# print (period_duration_slider_value)
period_value_index = \
int(round(len(self.Period_Duration_Steps) *
(abs(period_duration_slider_value) / self.period_duration_slider_value_span)))
self._period_duration_hours = \
self.Period_Duration_Steps[bound(0, (len(self.Period_Duration_Steps) - 1), period_value_index)]
self.period_duration_label.text = (self._period_value_display(self._period_duration_hours))
# print (period_duration_slider_value, period_value_index, self._period_duration_hours, self.period_duration_label.text)
return True
def _on_period_end_value_change(self, instance, period_end_slider_value, *args):
period_end_value_index = \
int(round(len(self.Period_Hours_Ago_Steps) *
(abs(period_end_slider_value) / self.period_end_slider_value_span)))
self._period_end_hours_ago = \
self.Period_Hours_Ago_Steps[bound(0, (len(self.Period_Hours_Ago_Steps) - 1), period_end_value_index)]
self.period_end_label.text = (self._period_value_display(self._period_end_hours_ago) + " ago")
return True
# ... Private functions
# Proxy for public event
def on_release(self, *args):
pass
# Private function
def _trigger_on_release(self, *args):
self.dispatch('on_release', self._period_duration_hours, self._period_end_hours_ago)
return True
Builder.load_string("""
<LabelExtended>:
background_color: 1, 1, 1, 1
canvas.before:
Color:
rgba: self.background_color
Rectangle:
pos: self.pos
size: self.size
""")
class LabelExtended(Label):
background_color = ListProperty([1, 1, 1, 1])
Factory.register('KivyExtended', module='LabelExtended')
class GridLayoutExtended ( GridLayout ):
def __init__(self, **kwargs):
# Gotta be able to do its business
super(GridLayoutExtended, self).__init__(**kwargs)
with self.canvas.before:
Color(1, 0, 0, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
def Build_Help_GridLayout ( on_help_escape_callback ):
kivy_app_help = \
GridLayoutExtended(cols=1, padding=[2, 2, 0, 2], spacing=0,
size_hint=(None, None), width=Initialize_Window_Width)
cwremote_screen_image = \
Image(source=path_to_cwremote_screen_image,
size=((Initialize_Window_Width - 4), 815), size_hint=(None, None))
kivy_app_help.add_widget(cwremote_screen_image)
help_escape_button = Button(text="Return to graphs", bold=True,
background_color=[1, 0, 0, 1],
size=((Initialize_Window_Width - 4), 28), size_hint=(None, None))
help_escape_button.bind(on_press=on_help_escape_callback)
kivy_app_help.add_widget(help_escape_button)
CW_Remote_Help_Text_Paragraphs = [
"The red '[b][color=ff0000]A[/color][/b]' marks the slider that adjusts the duration of the period for which the graphed data will appear. " +
"It can adjust from 1 hour to 7 days (168 hours). " +
"The label to the left of the this slider displays the current period duration in days and hours. " +
"This slider is logarithmic, it is increasingly sensitive toward the right end of the scale. ",
"The red '[b][color=ff0000]B[/color][/b]' marks the slider that adjusts the hours before now that the graphed period ends. " +
"It can adjust from 0 hours to 7 days (168 hours). " +
"The label to the right of the this slider displays the days and hours before now that the graphed period ends. " +
"This slider is logarithmic, it is increasingly sensitive toward the right end of the scale. "
]
# Add help text paragraphs to grid.
for help_text_paragraph in CW_Remote_Help_Text_Paragraphs:
help_txt_para = LabelExtended(text=help_text_paragraph, markup=True, text_size=(1272, None),
color=[0, 0, 0, 1], padding_x=2,
width=(Initialize_Window_Width - 4), size_hint=(None, None))
help_txt_para.height = math.ceil(len(help_text_paragraph) * (1.33 / 255)) * 25
kivy_app_help.add_widget(help_txt_para)
kivy_app_help.bind(minimum_height=kivy_app_help.setter('height'))
return kivy_app_help
class Build_Kivy_App_UI ( App ):
def __init__(self, **kwargs):
super(Build_Kivy_App_UI, self).__init__(**kwargs)
Window.bind(on_key_down=self.on_keyboard_down)
def build(self):
self.title = "Kivy App Demo Step2"
Window.bind(on_key_down=self.on_keyboard_down)
self.Period_Duration_Hours = 24
self.Period_End_Hours_Ago = 0
self.Visible_Payload_Count = 2
self.Kivy_App_UI = ScreenManager(transition=NoTransition())
# Duplex
self.Kivy_App_Duplex_Screen = Screen(name="duplex")
self.Kivy_App_Duplex = BoxLayout(orientation='horizontal')
self.Duplex_Tab_Bar = VerticalTabBarBoxLayout()
self.Duplex_Tab_Bar.bind(on_press_previous=self.on_previous)
self.Duplex_Tab_Bar.bind(on_press_next=self.on_next)
self.Duplex_Tab_Bar.bind(on_press_simplex=self.on_simplex)
self.Duplex_Tab_Bar.bind(on_press_duplex=self.on_duplex)
self.Duplex_Tab_Bar.bind(on_press_help=self.on_help)
self.Duplex_Kivy_App_Panel = BoxLayout(orientation='vertical', size_hint=(0.98, 1))
self.Duplex_TimeSpanControlBar = TimeSpanControlBar()
self.Duplex_TimeSpanControlBar.bind(on_release=self.update_with_parameters)
self.Duplex_TimeSpanControlBar.size_hint = (1, 0.04)
# Simplex
self.Kivy_App_Simplex_Screen = Screen(name="simplex")
self.Kivy_App_Simplex = BoxLayout(orientation='horizontal')
self.Simplex_Tab_Bar = VerticalTabBarBoxLayout()
self.Simplex_Tab_Bar.bind(on_press_previous=self.on_previous)
self.Simplex_Tab_Bar.bind(on_press_next=self.on_next)
self.Simplex_Tab_Bar.bind(on_press_simplex=self.on_simplex)
self.Simplex_Tab_Bar.bind(on_press_duplex=self.on_duplex)
self.Simplex_Tab_Bar.bind(on_press_help=self.on_help)
self.Simplex_Kivy_App_Panel = BoxLayout(orientation='vertical', size_hint=(0.98, 1))
self.Simplex_TimeSpanControlBar = TimeSpanControlBar()
self.Simplex_TimeSpanControlBar.bind(on_release=self.update_with_parameters)
self.Simplex_TimeSpanControlBar.size_hint = (1, 0.04)
# Duplex screen
self.Duplex_Upper_Payload_Box = BoxLayout(orientation='vertical', size_hint=(1, 0.48))
self.Duplex_Lower_Payload_Box = BoxLayout(orientation='vertical', size_hint=(1, 0.48))
self.Duplex_Kivy_App_Panel.add_widget(self.Duplex_Upper_Payload_Box)
self.Duplex_Kivy_App_Panel.add_widget(self.Duplex_TimeSpanControlBar)
self.Duplex_Kivy_App_Panel.add_widget(self.Duplex_Lower_Payload_Box)
self.Kivy_App_Duplex.add_widget(self.Duplex_Tab_Bar)
self.Kivy_App_Duplex.add_widget(self.Duplex_Kivy_App_Panel)
self.Kivy_App_Duplex_Screen.add_widget(self.Kivy_App_Duplex)
self.Kivy_App_UI.add_widget(self.Kivy_App_Duplex_Screen)
# Simplex screen
self.Simplex_Lower_Payload_Box = BoxLayout(orientation='vertical', size_hint=(1, (2 * 0.48)))
self.Simplex_Kivy_App_Panel.add_widget(self.Simplex_TimeSpanControlBar)
self.Simplex_Kivy_App_Panel.add_widget(self.Simplex_Lower_Payload_Box)
self.Kivy_App_Simplex.add_widget(self.Simplex_Tab_Bar)
self.Kivy_App_Simplex.add_widget(self.Simplex_Kivy_App_Panel)
self.Kivy_App_Simplex_Screen.add_widget(self.Kivy_App_Simplex)
self.Kivy_App_UI.add_widget(self.Kivy_App_Simplex_Screen)
# Help screen
self.Kivy_App_Help_Screen = Screen(name="help")
self.Kivy_App_Help = Build_Help_GridLayout(self.on_help_escape)
self.Kivy_App_Help_ScrollView = \
ScrollView(size_hint=(None, None), size=(Initialize_Window_Width, Initialize_Window_Height),
bar_width=5, bar_color=[1, 0, 0, 0.5], bar_inactive_color=[1, 0, 0, 0.2],
do_scroll_x=False)
self.Kivy_App_Help_ScrollView.add_widget(self.Kivy_App_Help)
self.Kivy_App_Help_Screen.add_widget(self.Kivy_App_Help_ScrollView)
self.Kivy_App_UI.add_widget(self.Kivy_App_Help_Screen)
return self.Kivy_App_UI
def on_simplex ( self, *args ):
if (self.Visible_Payload_Count == 2): self.toggle_duplex_versus_simplex()
return True
def on_duplex ( self, *args ):
if (self.Visible_Payload_Count == 1): self.toggle_duplex_versus_simplex()
return True
def toggle_duplex_versus_simplex ( self ):
if (self.Kivy_App_UI.current == "duplex"):
self.synchronize_control_bar_values(self.Simplex_TimeSpanControlBar)
self.Visible_Payload_Count = 1
self.Kivy_App_UI.current = "simplex"
elif (self.Kivy_App_UI.current == "simplex"):
self.synchronize_control_bar_values(self.Duplex_TimeSpanControlBar)
self.Visible_Payload_Count = 2
self.Kivy_App_UI.current = "duplex"
self.update()
def synchronize_control_bar_values ( self, target_control_bar ):
for destination_child in target_control_bar.children:
target_control_bar.set_period_duration_value(self.Period_Duration_Hours)
target_control_bar.set_period_end_value(self.Period_End_Hours_Ago)
def update_with_parameters ( self, instance, period_value, period_end_value, *args ):
# print ("update_params:", period_value, period_end_value)
self.Period_Duration_Hours = period_value
self.Period_End_Hours_Ago = period_end_value
self.update()
def update ( self, *args ):
if (self.Visible_Payload_Count == 2):
self.Duplex_Upper_Payload_Box.clear_widgets()
self.Duplex_Lower_Payload_Box.clear_widgets()
upper_payload_label = \
Label(text=Period_Span_NYC_Wall_Time(self.Period_Duration_Hours, self.Period_End_Hours_Ago))
self.Duplex_Upper_Payload_Box.add_widget(upper_payload_label)
lower_payload_label = \
Label(text=Period_Span_NYC_Wall_Time(self.Period_Duration_Hours, self.Period_End_Hours_Ago))
self.Duplex_Lower_Payload_Box.add_widget(lower_payload_label)
elif (self.Visible_Payload_Count == 1):
self.Simplex_Lower_Payload_Box.clear_widgets()
lower_payload_label = \
Label(text=Period_Span_NYC_Wall_Time(self.Period_Duration_Hours, self.Period_End_Hours_Ago))
self.Simplex_Lower_Payload_Box.add_widget(lower_payload_label)
self.Kivy_App_UI.canvas.ask_update()
def on_previous ( self, *args ):
return True
def on_next ( self, *args ):
return True
def on_help ( self, *args ):
self.Kivy_App_UI.current = "help"
return True
def on_help_escape ( self, *args ):
if (self.Kivy_App_UI.current == "help"):
if (self.Visible_Payload_Count == 2):
self.Kivy_App_UI.current = "duplex"
elif (self.Visible_Payload_Count == 1):
self.Kivy_App_UI.current = "simplex"
def on_keyboard_down ( self, instance, keyboard, keycode, text, modifiers ):
# print ("keycode:", keycode, ", text:", text, ", modifiers:", modifiers)
if (keycode == 44):
if (not (self.Kivy_App_UI.current == "help")):
self.toggle_duplex_versus_simplex()
elif (keycode == 41):
self.on_help_escape()
elif ((keycode == 81) or (keycode == 79)):
if (not (self.Kivy_App_UI.current == "help")):
self.on_next()
elif ((keycode == 82) or (keycode == 80)):
if (not (self.Kivy_App_UI.current == "help")):
self.on_previous()
return True
def on_start ( self, **kwargs ):
Clock.schedule_once(self.update, 0.5)
return True
if __name__ == '__main__':
Build_Kivy_App_UI().run() |
the-stack_0_20833 | # -*- coding: utf-8 -*-
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.layers as layers
class Network():
def __init__(self, sess, name, inpt_shape, inpt_network=None):
self.sess = sess
self.name = name
self.inpt_network = inpt_network
with tf.variable_scope(self.name):
if self.inpt_network is None:
self.inpt_shape = inpt_shape
self.inpt = tf.placeholder(tf.float32, (None,)+self.inpt_shape)
self.out = self.build_network(self.inpt)
else:
self.inpt_shape = self.inpt_network.inpt.shape
self.inpt = self.inpt_network.inpt
self.out = self.build_network(self.inpt_network.out)
self.vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
self.pertubable_vars = [var for var in self.trainable_vars if 'LayerNorm' not in var.name]
def build_network(self, inpt):
return inpt
class PreprocessingNetwork(Network):
def __init__(self, sess, name, inpt_shape, n_features, layer_norm=True):
self.n_features = n_features
self.layer_norm = layer_norm
super().__init__(sess=sess, name=name, inpt_shape=inpt_shape, inpt_network=None)
def build_network(self, inpt):
h = inpt
# h = layers.convolution2d(h, num_outputs=32, kernel_size=8, stride=4, activation_fn=None)
# if self.layer_norm:
# h = layers.layer_norm(h, activation_fn=tf.nn.relu)
# else:
# h = tf.nn.relu(h)
# h = layers.convolution2d(h, num_outputs=64, kernel_size=4, stride=2, activation_fn=None)
# if self.layer_norm:
# h = layers.layer_norm(h, activation_fn=tf.nn.relu)
# else:
# h = tf.nn.relu(h)
# h = layers.convolution2d(h, num_outputs=64, kernel_size=3, stride=1, activation_fn=None)
# if self.layer_norm:
# h = layers.layer_norm(h, activation_fn=tf.nn.relu)
# else:
# h = tf.nn.relu(h)
# h = layers.fully_connected(h, num_outputs=300, activation_fn=None)
# if self.layer_norm:
# h = layers.layer_norm(h, activation_fn=tf.nn.relu)
# else:
# h = tf.nn.relu(h)
# fs = layers.fully_connected(h, num_outputs=self.n_features, activation_fn=None)
# if self.layer_norm:
# fs = layers.layer_norm(fs, activation_fn=tf.nn.relu)
# else:
# fs = tf.nn.relu(fs)
# return layers.flatten(fs)
return inpt
class ActorNetwork(Network):
def __init__(self, sess, name, n_actions, inpt_shape, inpt_network,
learning_rate=1e-4, layer_norm=True, invert_gradients=False):
self.n_actions = n_actions
self.learning_rate = learning_rate
self.layer_norm = layer_norm
self.invert_gradients = invert_gradients
super().__init__(sess=sess, name=name, inpt_shape=inpt_shape, inpt_network=inpt_network)
with tf.variable_scope(self.name):
self.action_gradients = tf.placeholder(tf.float32, [None, self.n_actions])
self.batch_size = tf.placeholder(tf.float32, None)
self.total_trainable_vars = self.trainable_vars
if inpt_network is not None:
self.total_trainable_vars += inpt_network.trainable_vars
self.actor_gradients = tf.gradients(self.out, self.total_trainable_vars, -self.action_gradients)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.actor_gradients))
self.trainer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.trainer.apply_gradients(zip(self.actor_gradients, self.total_trainable_vars))
def build_network(self, inpt):
h = inpt
h = layers.fully_connected(h, num_outputs=300, activation_fn=None)
if self.layer_norm:
h = layers.layer_norm(h, activation_fn=tf.nn.relu)
else:
h = tf.nn.relu(h)
if not self.invert_gradients:
actions = layers.fully_connected(h, num_outputs=self.n_actions,
weights_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
activation_fn=tf.nn.tanh)
else:
actions = layers.fully_connected(h, num_outputs=self.n_actions, activation_fn=None)
return actions
def train(self, inpt, a_gradients, batch_size, actions):
if self.invert_gradients:
def invert(grad, a):
if grad<0:
return grad*(a+1)/2
return grad*(1-a)/2
for b in range(batch_size):
a_gradients[b] = [invert(grad, a) for (grad, a) in zip(a_gradients[b], actions[b])]
self.sess.run(self.optimize, feed_dict={
self.inpt: inpt,
self.action_gradients: a_gradients,
self.batch_size: batch_size
})
def policy(self, inpt):
return self.sess.run(self.out, feed_dict={
self.inpt: inpt,
})
class CriticNetwork(Network):
def __init__(self, sess, name, n_actions, inpt_shape, inpt_network, learning_rate=1e-3, layer_norm=True, optimism=0):
self.sess = sess
self.n_actions = n_actions
self.learning_rate = learning_rate
self.layer_norm = layer_norm
super().__init__(sess=sess, name=name, inpt_shape=inpt_shape, inpt_network=inpt_network)
with tf.variable_scope(self.name):
self.ys = tf.placeholder(tf.float32, [None, 1])
# self.loss = tf.losses.mean_squared_error(self.ys, self.out)
error = self.out - self.ys
def aloss(a): return tf.pow(error, 2) * tf.pow(tf.sign(error) + a, 2)
self.loss = aloss(-optimism)
self.trainer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.trainer.minimize(self.loss)
self.a_gradients = tf.gradients(self.out, self.actions)
def build_network(self, inpt):
self.actions = tf.placeholder(tf.float32, [None, self.n_actions])
h_a = self.actions
# h_a = layers.fully_connected(h_a, num_outputs=32, activation_fn=None)
# if self.layer_norm:
# h_a = layers.layer_norm(h_a, activation_fn=tf.nn.relu)
# else:
# h_a = tf.nn.relu(h_a)
h = tf.concat([inpt, h_a], 1)
# h = tf.add(inpt, h_a)
h = layers.fully_connected(h, num_outputs=300, activation_fn=None)
if self.layer_norm:
h = layers.layer_norm(h, activation_fn=tf.nn.relu)
else:
h = tf.nn.relu(h)
Q_values = layers.fully_connected(h, num_outputs=1, activation_fn=None)
return Q_values
def train(self, inpt, actions, ys):
return self.sess.run([self.out, self.loss, self.optimize], feed_dict={
self.inpt: inpt,
self.actions: actions,
self.ys: ys
})
def compute_Q(self, inpt, actions):
return self.sess.run(self.out, feed_dict={
self.inpt: inpt,
self.actions: actions
})
def action_gradients(self, inpt, actions):
return self.sess.run(self.a_gradients, feed_dict={
self.inpt: inpt,
self.actions: actions
})
class QNetwork(Network):
def __init__(self, sess, name, n_actions, inpt_shape, inpt_network,
dueling=True, optimism=0):
self.sess = sess
self.n_actions = n_actions
self.dueling=dueling
self.learning_rate = tf.placeholder(tf.float32, shape=[])
super().__init__(sess=sess, name=name, inpt_shape=inpt_shape, inpt_network=inpt_network)
with tf.variable_scope(self.name):
self.ys = tf.placeholder(tf.float32, [None])
self.actions = tf.placeholder(tf.int32, [None])
self.selected_out = tf.reduce_sum(tf.multiply(self.out, tf.one_hot(self.actions, self.n_actions)), 1)
self.error = self.ys - self.selected_out
# def aloss(a): return tf.pow(error, 2) * tf.pow(tf.sign(error) + a, 2)
# self.loss = aloss(-optimism)
self.loss = tf.losses.mean_squared_error(self.ys, self.selected_out)
self.trainer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.trainer.minimize(self.loss)
def build_network(self, inpt):
h = layers.fully_connected(inpt, num_outputs=64, activation_fn=tf.nn.relu)
h = layers.fully_connected(h, num_outputs=64, activation_fn=tf.nn.relu)
hidden_out = layers.fully_connected(h, num_outputs=64, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(hidden_out, num_outputs=self.n_actions, activation_fn=None)
if self.dueling:
state_score = layers.fully_connected(hidden_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
out = state_score + action_scores_centered
else:
out = action_scores
return out
def train(self, inpt, actions, ys, learning_rate):
return self.sess.run([self.error, self.loss, self.optimize], feed_dict={
self.inpt: inpt,
self.actions: actions,
self.ys: ys,
self.learning_rate: learning_rate
})
def compute_Q(self, inpt):
return self.sess.run(self.out, feed_dict={
self.inpt: inpt
})
def compute_selected_Q(self, inpt, actions):
return self.sess.run(self.selected_out, feed_dict={
self.inpt: inpt,
self.actions: actions
})
class PredictionNetwork(Network):
def __init__(self, sess, name, n_actions, inpt_shape, inpt_network, n_output,
learning_rate=1e-4, layer_norm=True, classifier=False):
self.sess = sess
self.n_actions = n_actions
self.n_output = n_output
self.learning_rate = learning_rate
self.layer_norm = layer_norm
self.classifier = classifier
super().__init__(sess=sess, name=name, inpt_shape=inpt_shape, inpt_network=inpt_network)
with tf.variable_scope(self.name):
self.ys = tf.placeholder(tf.float32, [None, self.n_output])
if self.classifier:
self.loss = tf.losses.sigmoid_cross_entropy(self.ys, self.out)/self.n_output
else:
self.loss = tf.losses.mean_squared_error(self.ys, self.out)/self.n_output
self.trainer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.trainer.minimize(self.loss)
def build_network(self, inpt):
self.actions = tf.placeholder(tf.float32, [None, self.n_actions])
h_a = self.actions
# h_a = layers.fully_connected(h_a, num_outputs=32, activation_fn=None)
# if self.layer_norm:
# h_a = layers.layer_norm(h_a, activation_fn=tf.nn.relu)
# else:
# h_a = tf.nn.relu(h_a)
h = tf.concat([inpt, h_a], 1)
# h = tf.add(inpt, h_a)
h = layers.fully_connected(h, num_outputs=64, activation_fn=None)
if self.layer_norm:
h = layers.layer_norm(h, activation_fn=tf.nn.relu)
else:
h = tf.nn.relu(h)
pred = layers.fully_connected(h, num_outputs=self.n_output, activation_fn=None)
return pred
def train(self, inpt, actions, ys):
return self.sess.run([self.loss, self.optimize], feed_dict={
self.inpt: inpt,
self.actions: actions,
self.ys: ys
})
def predict(self, inpt, actions):
return self.sess.run(self.out, feed_dict={
self.inpt: inpt,
self.actions: actions
})
|
the-stack_0_20834 | """
Implement weak defense model for Athena on top of IBM ART.
It wraps a keras model to a weak defense in Athena ensemble.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import six
from art.classifiers.classifier import Classifier, ClassifierNeuralNetwork, ClassifierGradients
from models.image_processor import transform
logger = logging.getLogger(__name__)
class WeakDefense(ClassifierNeuralNetwork, ClassifierGradients, Classifier):
def __init__(self, model, trans_configs, use_logits=False, channel_index=3,
clip_values=(0., 1.), input_layer=0, output_layer=0, ):
super(WeakDefense, self).__init__(clip_values=clip_values,
preprocessing_defences=None,
postprocessing_defences=None,
preprocessing=(0, 1),
channel_index=channel_index, )
self._model = model
self._trans_configs = trans_configs
self._channel_index = channel_index
self._input_layer = input_layer
self._output_layer = output_layer
if "<class 'tensorflow" in str(type(model)):
self.is_tensorflow = True
elif "<class 'keras" in str(type(model)):
self.is_tensorflow = False
else:
raise TypeError("Type of model not recognized:" + type(model))
self._initialize_params(model, use_logits, input_layer, output_layer)
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
# Check shape of `x` because of custom function for `_loss_gradients`
if self._input_shape != x.shape[1:]:
raise ValueError(
"Error when checking x: expected x to have shape {} but got array with shape {}".format(
self._input_shape, x.shape[1:]
)
)
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
# Adjust the shape of y for loss functions that do not take labels in one-hot encoding
if self._reduce_labels:
y_preprocessed = np.argmax(y_preprocessed, axis=1)
# Compute gradients
gradients = self._loss_gradients([x_preprocessed, y_preprocessed])[0]
gradients = self._apply_preprocessing_gradient(x, gradients)
assert gradients.shape == x_preprocessed.shape
return gradients
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:type label: `int` or `list`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
:rtype: `np.ndarray`
"""
# Check value of label for computing gradients
if not (
label is None
or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes()))
or (
isinstance(label, np.ndarray)
and len(label.shape) == 1
and (label < self.nb_classes()).all()
and label.shape[0] == x.shape[0]
)
):
raise ValueError("Label %s is out of range." % str(label))
# Check shape of `x` because of custom function for `_loss_gradients`
if self._input_shape != x.shape[1:]:
raise ValueError(
"Error when checking x: expected x to have shape {} but got array with shape {}".format(
self._input_shape, x.shape[1:]
)
)
self._init_class_gradients(label=label)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
if label is None:
# Compute the gradients w.r.t. all classes
gradients = np.swapaxes(np.array(self._class_gradients([x_preprocessed])), 0, 1)
elif isinstance(label, (int, np.integer)):
# Compute the gradients only w.r.t. the provided label
gradients = np.swapaxes(np.array(self._class_gradients_idx[label]([x_preprocessed])), 0, 1)
assert gradients.shape == (x_preprocessed.shape[0], 1) + self.input_shape
else:
# For each sample, compute the gradients w.r.t. the indicated target class (possibly distinct)
unique_label = list(np.unique(label))
gradients = np.array([self._class_gradients_idx[l]([x_preprocessed]) for l in unique_label])
gradients = np.swapaxes(np.squeeze(gradients, axis=1), 0, 1)
lst = [unique_label.index(i) for i in label]
gradients = np.expand_dims(gradients[np.arange(len(gradients)), lst], axis=1)
gradients = self._apply_preprocessing_gradient(x, gradients)
return gradients
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
from art.config import ART_NUMPY_DTYPE
# Apply transformation
x_preprocessed = transform(x, self._trans_configs)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x_preprocessed, y=None, fit=False)
# Run predictions with batching
predictions = np.zeros((x_preprocessed.shape[0], self.nb_classes()), dtype=ART_NUMPY_DTYPE)
for batch_index in range(int(np.ceil(x_preprocessed.shape[0] / float(batch_size)))):
begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preprocessed.shape[0])
predictions[begin:end] = self._model.predict([x_preprocessed[begin:end]])
# Apply postprocessing
predictions = self._apply_postprocessing(preds=predictions, fit=False)
return predictions
def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
epochs or the number of steps per epoch as part of this argument will result in as error.
:type kwargs: `dict`
:return: `None`
"""
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
# Adjust the shape of y for loss functions that do not take labels in one-hot encoding
if self._reduce_labels:
y_preprocessed = np.argmax(y_preprocessed, axis=1)
gen = generator_fit(x_preprocessed, y_preprocessed, batch_size)
steps_per_epoch = max(int(x_preprocessed.shape[0] / batch_size), 1)
self._model.fit_generator(gen, steps_per_epoch=steps_per_epoch, epochs=nb_epochs, **kwargs)
def fit_generator(self, generator, nb_epochs=20, **kwargs):
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in Keras, it will.
:type generator: :class:`.DataGenerator`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
epochs as part of this argument will result in as error.
:type kwargs: `dict`
:return: `None`
"""
from art.data_generators import KerasDataGenerator
# Try to use the generator as a Keras native generator, otherwise use it through the `DataGenerator` interface
if isinstance(generator, KerasDataGenerator) and \
(self.preprocessing_defences is None or self.preprocessing_defences == []) and \
self.preprocessing == (0, 1):
try:
self._model.fit_generator(generator.iterator, epochs=nb_epochs, **kwargs)
except ValueError:
logger.info('Unable to use data generator as Keras generator. Now treating as framework-independent.')
super(WeakDefense, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)
else:
super(WeakDefense, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)
@property
def layer_names(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
return self._layer_names
def get_activations(self, x, layer, batch_size):
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:type x: `np.ndarray`
:param layer: Layer for computing the activations
:type layer: `int` or `str`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:rtype: `np.ndarray`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
from art.config import ART_NUMPY_DTYPE
if isinstance(layer, six.string_types):
if layer not in self._layer_names:
raise ValueError("Layer name %s is not part of the graph." % layer)
layer_name = layer
elif isinstance(layer, int):
if layer < 0 or layer >= len(self._layer_names):
raise ValueError(
"Layer index %d is outside of range (0 to %d included)." % (layer, len(self._layer_names) - 1)
)
layer_name = self._layer_names[layer]
else:
raise TypeError("Layer must be of type `str` or `int`.")
layer_output = self._model.get_layer(layer_name).output
output_func = k.function([self._input], [layer_output])
if x.shape == self.input_shape:
x_expanded = np.expand_dims(x, 0)
else:
x_expanded = x
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False)
# Determine shape of expected output and prepare array
output_shape = output_func([x_preprocessed[0][None, ...]])[0].shape
activations = np.zeros((x_preprocessed.shape[0],) + output_shape[1:], dtype=ART_NUMPY_DTYPE)
# Get activations with batching
for batch_index in range(int(np.ceil(x_preprocessed.shape[0] / float(batch_size)))):
begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preprocessed.shape[0])
activations[begin:end] = output_func([x_preprocessed[begin:end]])[0]
return activations
def set_learning_phase(self, train):
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
:type train: `bool`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
if isinstance(train, bool):
self._learning_phase = train
k.set_learning_phase(int(train))
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int`
"""
return self._nb_classes
def save(self, filename, path=None):
"""
Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `ART_DATA_PATH`.
:type path: `str`
:return: None
"""
import os
if path is None:
from art.config import ART_DATA_PATH
full_path = os.path.join(ART_DATA_PATH, filename)
else:
full_path = os.path.join(path, filename)
folder = os.path.split(full_path)[0]
if not os.path.exists(folder):
os.makedirs(folder)
self._model.save(str(full_path))
logger.info("Model saved in path: %s.", full_path)
def _init_class_gradients(self, label=None):
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
if len(self._output.shape) == 2:
nb_outputs = self._output.shape[1]
else:
raise ValueError("Unexpected output shape for classification in Keras model.")
if label is None:
logger.debug("Computing class gradients for all %i classes.", self.nb_classes())
if not hasattr(self, "_class_gradients"):
class_gradients = [k.gradients(self._predictions_op[:, i], self._input)[0] for i in range(nb_outputs)]
self._class_gradients = k.function([self._input], class_gradients)
else:
if isinstance(label, int):
unique_labels = [label]
else:
unique_labels = np.unique(label)
logger.debug("Computing class gradients for classes %s.", str(unique_labels))
if not hasattr(self, "_class_gradients_idx"):
self._class_gradients_idx = [None for _ in range(nb_outputs)]
for current_label in unique_labels:
if self._class_gradients_idx[current_label] is None:
class_gradients = [k.gradients(self._predictions_op[:, current_label], self._input)[0]]
self._class_gradients_idx[current_label] = k.function([self._input], class_gradients)
def _initialize_params(self, model, use_logits, input_layer, output_layer, synthesis=True, num_synthesis=10):
"""
Initialize most parameters of the classifier. This is a convenience function called by `__init__` and
`__setstate__` to avoid code duplication.
:param model: Keras model
:type model: `keras.models.Model`
:param use_logits: True if the output of the model are logits.
:type use_logits: `bool`
:param input_layer: Which layer to consider as the Input when the model has multiple input layers.
:type input_layer: `int`
:param output_layer: Which layer to consider as the Output when the model has multiple output layers.
:type output_layer: `int`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow as tf
if tf.executing_eagerly():
raise ValueError("TensorFlow is executing eagerly. Please disable eager execution.")
import tensorflow.keras as keras
import tensorflow.keras.backend as k
else:
import keras
import keras.backend as k
if hasattr(model, "inputs"):
self._input_layer = input_layer
self._input = model.inputs[input_layer]
else:
self._input = model.input
self._input_layer = 0
if hasattr(model, "outputs"):
self._output = model.outputs[output_layer]
self._output_layer = output_layer
else:
self._output = model.output
self._output_layer = 0
_, self._nb_classes = k.int_shape(self._output)
self._input_shape = k.int_shape(self._input)[1:]
logger.debug(
"Inferred %i classes and %s as input shape for Keras classifier.", self.nb_classes(), str(self.input_shape)
)
self._use_logits = use_logits
# Get loss function
if not hasattr(self._model, "loss"):
logger.warning("Keras model has no loss set. Classifier tries to use `k.sparse_categorical_crossentropy`.")
loss_function = k.sparse_categorical_crossentropy
else:
if isinstance(self._model.loss, six.string_types):
loss_function = getattr(k, self._model.loss)
elif "__name__" in dir(self._model.loss) and self._model.loss.__name__ in [
"categorical_hinge",
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
"kullback_leibler_divergence",
]:
if self._model.loss.__name__ in ["categorical_hinge", "kullback_leibler_divergence"]:
loss_function = getattr(keras.losses, self._model.loss.__name__)
else:
loss_function = getattr(keras.backend, self._model.loss.__name__)
elif isinstance(
self._model.loss,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.SparseCategoricalCrossentropy,
keras.losses.BinaryCrossentropy,
keras.losses.KLDivergence,
),
):
loss_function = self._model.loss
else:
loss_function = getattr(k, self._model.loss.__name__)
# Check if loss function is an instance of loss function generator, the try is required because some of the
# modules are not available in older Keras versions
try:
flag_is_instance = isinstance(
loss_function,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.BinaryCrossentropy,
keras.losses.KLDivergence,
),
)
except AttributeError:
flag_is_instance = False
# Check if the labels have to be reduced to index labels and create placeholder for labels
if (
"__name__" in dir(loss_function)
and loss_function.__name__
in ["categorical_hinge", "categorical_crossentropy", "binary_crossentropy", "kullback_leibler_divergence"]
) or (self.is_tensorflow and flag_is_instance):
self._reduce_labels = False
label_ph = k.placeholder(shape=self._output.shape)
elif (
"__name__" in dir(loss_function) and loss_function.__name__ in ["sparse_categorical_crossentropy"]
) or isinstance(loss_function, keras.losses.SparseCategoricalCrossentropy):
self._reduce_labels = True
label_ph = k.placeholder(shape=[None,])
else:
raise ValueError("Loss function not recognised.")
# Define the loss using the loss function
if "__name__" in dir(loss_function,) and loss_function.__name__ in [
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
]:
loss_ = loss_function(label_ph, self._output, from_logits=self._use_logits)
elif "__name__" in dir(loss_function) and loss_function.__name__ in [
"categorical_hinge",
"kullback_leibler_divergence",
]:
loss_ = loss_function(label_ph, self._output)
elif isinstance(
loss_function,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.SparseCategoricalCrossentropy,
keras.losses.KLDivergence,
keras.losses.BinaryCrossentropy,
),
):
loss_ = loss_function(label_ph, self._output)
# Average the loss for synthesizer
# if synthesis:
# print('>>> SYNTHESIZING ADVERSARIAL EXAMPLES...')
# raw_loss_ = loss_.detach().clone()
# num_samples = int(raw_loss_.shape[0] / num_synthesis)
# loss_ = []
# for i in range(num_samples):
# l_ = 0.
# for j in range(num_synthesis):
# id = i * num_synthesis + j
# l_ += raw_loss_[id]
# loss_.append(l_)
# loss_ = np.asarray(loss_)
# loss_ = k.sum(loss_)
# Define loss gradients
loss_gradients = k.gradients(loss_, self._input)
if k.backend() == "tensorflow":
loss_gradients = loss_gradients[0]
elif k.backend() == "cntk":
raise NotImplementedError("Only TensorFlow and Theano support is provided for Keras.")
# Set loss, gradients and prediction functions
self._predictions_op = self._output
self._loss = loss_
self._loss_gradients = k.function([self._input, label_ph], [loss_gradients])
# Get the internal layer
self._layer_names = self._get_layers()
def _get_layers(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
"""
# pylint: disable=E0401
if self.is_tensorflow:
from tensorflow.keras.layers import InputLayer
else:
from keras.engine.topology import InputLayer
layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)]
logger.info("Inferred %i hidden layers on Keras classifier.", len(layer_names))
return layer_names
def __getstate__(self):
"""
Use to ensure `KerasClassifier` can be pickled.
:return: State dictionary with instance parameters.
:rtype: `dict`
"""
import time
state = self.__dict__.copy()
# Remove the unpicklable entries
del state["_model"]
del state["_input"]
del state["_output"]
del state["_predictions_op"]
del state["_loss"]
del state["_loss_gradients"]
del state["_layer_names"]
model_name = str(time.time()) + ".h5"
state["model_name"] = model_name
self.save(model_name)
return state
def __setstate__(self, state):
"""
Use to ensure `KerasClassifier` can be unpickled.
:param state: State dictionary with instance parameters to restore.
:type state: `dict`
"""
self.__dict__.update(state)
# Load and update all functionality related to Keras
# pylint: disable=E0401
import os
from art.config import ART_DATA_PATH
if self.is_tensorflow:
from tensorflow.keras.models import load_model
else:
from keras.models import load_model
full_path = os.path.join(ART_DATA_PATH, state["model_name"])
model = load_model(str(full_path))
self._model = model
self._initialize_params(model, state["_use_logits"], state["_input_layer"], state["_output_layer"])
def __repr__(self):
repr_ = (
"%s(model=%r, use_logits=%r, channel_index=%r, clip_values=%r, preprocessing_defences=%r, "
"postprocessing_defences=%r, preprocessing=%r, input_layer=%r, output_layer=%r)"
% (
self.__module__ + "." + self.__class__.__name__,
self._model,
self._use_logits,
self.channel_index,
self.clip_values,
self.preprocessing_defences,
self.postprocessing_defences,
self.preprocessing,
self._input_layer,
self._output_layer,
)
)
return repr_
def generator_fit(x, y, batch_size=128):
"""
Minimal data generator for randomly batching large datasets.
:param x: The data sample to batch.
:type x: `np.ndarray`
:param y: The labels for `x`. The first dimension has to match the first dimension of `x`.
:type y: `np.ndarray`
:param batch_size: The size of the batches to produce.
:type batch_size: `int`
:return: A batch of size `batch_size` of random samples from `(x, y)`
:rtype: `tuple(np.ndarray, np.ndarray)`
"""
while True:
indices = np.random.randint(x.shape[0], size=batch_size)
yield x[indices], y[indices]
|
the-stack_0_20835 | import sys
import gym
import pylab
import random
import numpy as np
from SumTree import SumTree
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
EPISODES = 300
# 카트폴 예제에서의 DQN 에이전트
class DQNAgent:
def __init__(self, state_size, action_size):
self.render = False
self.load_model = False
# 상태와 행동의 크기 정의
self.state_size = state_size
self.action_size = action_size
# DQN 하이퍼파라미터
self.discount_factor = 0.99
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = 0.999
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 2000
self.memory_size = 2000
# 리플레이 메모리, 최대 크기 2000
self.memory = Memory(self.memory_size)
# 모델과 타깃 모델 생성
self.model = self.build_model()
self.target_model = self.build_model()
# 타깃 모델 초기화
self.update_target_model()
if self.load_model:
self.model.load_weights("./save_model/cartpole_dqn_trained.h5")
# 상태가 입력, 큐함수가 출력인 인공신경망 생성
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(24, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='linear',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# 타깃 모델을 모델의 가중치로 업데이트
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())
# 입실론 탐욕 정책으로 행동 선택
def get_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
else:
q_value = self.model.predict(state)
return np.argmax(q_value[0])
# 샘플 <s, a, r, s'>을 리플레이 메모리에 저장
def append_sample(self, state, action, reward, next_state, done):
if self.epsilon == 1:
done = True
# TD-error 를 구해서 같이 메모리에 저장
target = self.model.predict([state])
old_val = target[0][action]
target_val = self.target_model.predict([next_state])
if done:
target[0][action] = reward
else:
target[0][action] = reward + self.discount_factor * (
np.amax(target_val[0]))
error = abs(old_val - target[0][action])
self.memory.add(error, (state, action, reward, next_state, done))
# 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습
def train_model(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# 메모리에서 배치 크기만큼 무작위로 샘플 추출
mini_batch = self.memory.sample(self.batch_size)
errors = np.zeros(self.batch_size)
states = np.zeros((self.batch_size, self.state_size))
next_states = np.zeros((self.batch_size, self.state_size))
actions, rewards, dones = [], [], []
for i in range(self.batch_size):
states[i] = mini_batch[i][1][0]
actions.append(mini_batch[i][1][1])
rewards.append(mini_batch[i][1][2])
next_states[i] = mini_batch[i][1][3]
dones.append(mini_batch[i][1][4])
# 현재 상태에 대한 모델의 큐함수
# 다음 상태에 대한 타깃 모델의 큐함수
target = self.model.predict(states)
target_val = self.target_model.predict(next_states)
# 벨만 최적 방정식을 이용한 업데이트 타깃
for i in range(self.batch_size):
old_val = target[i][actions[i]]
if dones[i]:
target[i][actions[i]] = rewards[i]
else:
target[i][actions[i]] = rewards[i] + self.discount_factor * (
np.amax(target_val[i]))
# TD-error를 저장
errors[i] = abs(old_val - target[i][actions[i]])
# TD-error로 priority 업데이트
for i in range(self.batch_size):
idx = mini_batch[i][0]
self.memory.update(idx, errors[i])
self.model.fit(states, target, batch_size=self.batch_size,
epochs=1, verbose=0)
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _getPriority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append((idx, data))
return batch
def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)
if __name__ == "__main__":
# CartPole-v1 환경, 최대 타임스텝 수가 500
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# DQN 에이전트 생성
agent = DQNAgent(state_size, action_size)
scores, episodes = [], []
step = 0
for e in range(EPISODES):
done = False
score = 0
# env 초기화
state = env.reset()
state = np.reshape(state, [1, state_size])
while not done:
if agent.render:
env.render()
step += 1
# 현재 상태로 행동을 선택
action = agent.get_action(state)
# 선택한 행동으로 환경에서 한 타임스텝 진행
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
# 에피소드가 중간에 끝나면 -100 보상
r = reward if not done or score+reward == 500 else -10
# 리플레이 메모리에 샘플 <s, a, r, s'> 저장
agent.append_sample(state, action, r, next_state, done)
# 매 타임스텝마다 학습
if step >= agent.train_start:
agent.train_model()
score += reward
state = next_state
if done:
# 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트
agent.update_target_model()
# score = score if score == 500 else score + 100
# 에피소드마다 학습 결과 출력
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_dqn.png")
print("episode:", e, " score:", score, " memory length:",
step if step <= agent.memory_size else agent.memory_size, " epsilon:", agent.epsilon)
# 이전 10개 에피소드의 점수 평균이 490보다 크면 학습 중단
if np.mean(scores[-min(10, len(scores)):]) > 490:
agent.model.save_weights("./save_model/cartpole_dqn.h5")
sys.exit()
|
the-stack_0_20837 | from librispect.features import spectrogram
import glob
import librispect as lspct
import librosa
import numpy as np
import pytest
def test_stft_to_wav():
"""testing the similarity between the original and reconstructed data"""
hparams = lspct.features.spectrogram.HPARAMS
wavfile = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[0]
data, hparams["sample_rate"] = librosa.load(wavfile)
stft = spectrogram.wav_to_stft(data, hparams)
reconstructed_data = spectrogram.stft_to_wav(stft, hparams)
# assume aligned, trim end
min_len = min(len(data), len(reconstructed_data))
assert np.allclose(data[:min_len], reconstructed_data[:min_len])
def test_mel_spectrogram():
"""test the mel sepctrogram"""
hparams = lspct.features.spectrogram.HPARAMS
wavfile = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[0]
data, hparams["sample_rate"] = librosa.load(wavfile)
mel_basis = spectrogram.build_mel_basis(hparams)
for pre in [True, False]:
stft = spectrogram.wav_to_stft(data, hparams, pre=pre)
spect = spectrogram.spectrogram(stft, hparams)
assert len(spect.shape) == 2, spect.shape
mel_spect = spectrogram.linear_to_mel(spect, mel_basis)
assert len(mel_spect.shape) == 2, mel_spect.shape
assert hparams["num_mels"] in mel_spect.shape, mel_spect.shape
assert len(mel_spect) != 0
def test_spect_iter():
"""test the spect_maker class with the diff
between spect created by class and function"""
hparams = lspct.features.spectrogram.HPARAMS
wavfile = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[0]
data, hparams["sample_rate"] = librosa.load(wavfile)
sm = spectrogram.spect_maker(hparams)
for spect, stft in sm.spect_iter(
glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[:25]
):
assert len(stft.shape) == 3, stft.shape
assert stft.shape[0] == 2, stft.shape
assert len(spect.shape) == 2, spect.shape
new_spect = spectrogram.spectrogram(stft, hparams)
assert len(new_spect.shape) == 2, new_spect.shape
assert np.allclose(spect, new_spect)
def test_mel_spect_iter():
hparams = lspct.features.spectrogram.HPARAMS
wavfile = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[0]
data, hparams["sample_rate"] = librosa.load(wavfile)
sm = spectrogram.spect_maker(hparams)
mel_basis = spectrogram.build_mel_basis(hparams)
for mspect, stft in sm.mel_spect_iter(
glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[:25]
):
assert len(stft.shape) == 3, stft.shape
assert stft.shape[0] == 2, stft.shape
assert len(mspect.shape) == 2, mspect.shape
new_spect = spectrogram.spectrogram(stft, hparams)
assert len(new_spect.shape) == 2, new_spect.shape
new_mspect = spectrogram.linear_to_mel(new_spect, mel_basis)
assert np.allclose(mspect, new_mspect)
test_params = [
(1, 16, 1, 32),
(5, 16, 1, 32),
(5, 4, 1, 32),
(5, 32, 1, 32),
(5, 16, 4, 32),
(5, 16, 16, 32),
(5, 16, 1, 8),
(5, 16, 1, 64),
(25, 16, 1, 32),
(100, 16, 1, 32),
# (None, 16, 16, 32),
]
@pytest.mark.parametrize("samples,window_size,step_size,batch_size", test_params)
def test_batch_ss_iter(samples, window_size, step_size, batch_size):
hparams = lspct.features.spectrogram.HPARAMS
wavfile = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())[0]
data, hparams["sample_rate"] = librosa.load(wavfile)
sm = spectrogram.spect_maker(hparams, window_size=window_size, step_size=step_size)
path_list = glob.glob((lspct.paths.WAV_DIR / "*.wav").as_posix())
if samples:
path_list = path_list[:samples]
steps_per_epoch = sm.batch_ss_per_epoch(path_list, batch_size)
assert isinstance(steps_per_epoch, int)
assert steps_per_epoch > 0, steps_per_epoch
for i, (spect_batch, stft_batch) in enumerate(
sm.batch_iter(path_list, batch_size, False)
):
assert spect_batch.shape[0] == stft_batch.shape[0], (
spect_batch.shape[0],
stft_batch.shape[0],
)
for batch in [spect_batch, stft_batch]:
assert batch.shape[0] <= batch_size, (batch.shape[0], batch_size)
assert batch.shape[-1] == window_size, (batch.shape[-1], window_size)
if step_size < window_size:
overlap = window_size - step_size
first_sample = batch[0, ...]
second_sample = batch[1, ...]
assert np.allclose(
first_sample[..., -overlap:], second_sample[..., :overlap]
)
if i == 0:
start_spect_batch = spect_batch
start_stft_batch = stft_batch
elif i == steps_per_epoch:
assert np.allclose(
start_spect_batch, spect_batch
), "hasn't looped around yet"
assert np.allclose(start_stft_batch, stft_batch), "hasn't looped around yet"
break
if __name__ == "__main__":
test_stft_to_wav()
test_mel_spectrogram()
test_spect_iter()
test_mel_spect_iter()
|
the-stack_0_20839 | """
Copyright 2018 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from cfripper.model.rule_processor import Rule
from cfripper.config.logger import get_logger
logger = get_logger()
class S3BucketPublicReadAclAndListStatementRule(Rule):
REASON = "S3 Bucket {} should not have a public read acl and list bucket statement"
MONITOR_MODE = True
def invoke(self, resources):
# Get all bucket policies and filter to get the ones that allow list actions
bucket_policies = []
for policy in resources.get("AWS::S3::BucketPolicy", []):
if any(policy.policy_document.wildcard_allowed_actions(pattern=r"^s3:L.*$")):
bucket_policies.append(policy.bucket["Ref"])
# Check if bucket policies exist
if bucket_policies:
# Get S3 buckets
buckets = resources.get("AWS::S3::Bucket", [])
for resource in buckets:
try:
if resource.access_control == "PublicRead" and resource.logical_id in bucket_policies:
self.add_failure(
type(self).__name__,
self.REASON.format(resource.logical_id),
)
except AttributeError:
logger.info("No access control on bucket")
|
the-stack_0_20845 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Luascoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import LuascoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_approx,
assert_equal,
)
class WalletGroupTest(LuascoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
|
the-stack_0_20849 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import os
import subprocess
import cudf
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import roc_auc_score
import nvtabular as nvt
import nvtabular.tools.data_gen as datagen
from nvtabular import ops
from nvtabular.io.dataset import Dataset
tf = pytest.importorskip("tensorflow")
# If tensorflow isn't installed skip these tests. Note that the
# tf_dataloader import needs to happen after this line
tf_dataloader = pytest.importorskip("nvtabular.loader.tensorflow")
def test_nested_list():
num_rows = 100
batch_size = 12
df = pd.DataFrame(
{
"data": [
np.random.rand(np.random.randint(10) + 1, 3).tolist() for i in range(num_rows)
],
"data2": [np.random.rand(np.random.randint(10) + 1).tolist() for i in range(num_rows)],
"label": [np.random.rand() for i in range(num_rows)],
}
)
train_dataset = tf_dataloader.KerasSequenceLoader(
Dataset(df),
cont_names=["data", "data2"],
label_names=["label"],
batch_size=batch_size,
shuffle=False,
)
batch = next(iter(train_dataset))
pad_data_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data"][0][:, 0], batch[0]["data"][1][:, 0]
).to_tensor()
true_data_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 0].tolist()).to_tensor(), [batch_size, -1]
)
pad_data2_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data2"][0][:, 0], batch[0]["data2"][1][:, 0]
).to_tensor()
true_data2_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 1].tolist()).to_tensor(), [batch_size, -1]
)
assert np.allclose(pad_data_col.numpy(), true_data_col.numpy())
assert np.allclose(pad_data2_col.numpy(), true_data2_col.numpy())
def test_shuffling():
num_rows = 10000
batch_size = 10000
df = pd.DataFrame({"a": np.asarray(range(num_rows)), "b": np.asarray([0] * num_rows)})
train_dataset = tf_dataloader.KerasSequenceLoader(
Dataset(df), cont_names=["a"], label_names=["b"], batch_size=batch_size, shuffle=True
)
batch = next(iter(train_dataset))
first_batch = tf.reshape(tf.cast(batch[0]["a"].cpu(), tf.int32), (batch_size,))
in_order = tf.range(0, batch_size, dtype=tf.int32)
assert (first_batch != in_order).numpy().any()
assert (tf.sort(first_batch) == in_order).numpy().all()
@pytest.mark.parametrize("batch_size", [10, 9, 8])
@pytest.mark.parametrize("drop_last", [True, False])
@pytest.mark.parametrize("num_rows", [100])
def test_tf_drp_reset(tmpdir, batch_size, drop_last, num_rows):
df = cudf.DataFrame(
{
"cat1": [1] * num_rows,
"cat2": [2] * num_rows,
"cat3": [3] * num_rows,
"label": [0] * num_rows,
"cont3": [3.0] * num_rows,
"cont2": [2.0] * num_rows,
"cont1": [1.0] * num_rows,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
label_names=label_name,
shuffle=False,
drop_last=drop_last,
)
all_len = len(data_itr) if drop_last else len(data_itr) - 1
all_rows = 0
for idx, (X, y) in enumerate(data_itr):
all_rows += len(X["cat1"])
if idx < all_len:
assert list(X["cat1"].numpy()) == [1] * batch_size
assert list(X["cat2"].numpy()) == [2] * batch_size
assert list(X["cat3"].numpy()) == [3] * batch_size
assert list(X["cont1"].numpy()) == [1.0] * batch_size
assert list(X["cont2"].numpy()) == [2.0] * batch_size
assert list(X["cont3"].numpy()) == [3.0] * batch_size
if drop_last and num_rows % batch_size > 0:
assert num_rows > all_rows
else:
assert num_rows == all_rows
def test_tf_catname_ordering(tmpdir):
df = cudf.DataFrame(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"cont3": [3.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
)
for X, y in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(X["cont3"].numpy()) == [3.0] * 10
def test_tf_map(tmpdir):
df = cudf.DataFrame(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"sample_weight": [1.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["sample_weight", "cont2", "cont1"]
label_name = ["label"]
def add_sample_weight(features, labels, sample_weight_col_name="sample_weight"):
sample_weight = tf.cast(features.pop(sample_weight_col_name) > 0, tf.float32)
return features, labels, sample_weight
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
).map(add_sample_weight)
for X, y, sample_weight in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(sample_weight.numpy()) == [1.0] * 10
# TODO: include use_columns option
# TODO: include parts_per_chunk test
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.06])
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("use_paths", [True, False])
@pytest.mark.parametrize("cpu_true", [False, True])
@pytest.mark.parametrize("device", ["cpu", 0])
def test_tf_gpu_dl(
tmpdir, paths, use_paths, device, cpu_true, dataset, batch_size, gpu_memory_frac, engine
):
cont_names = ["x", "y", "id"]
cat_names = ["name-string"]
label_name = ["label"]
if engine == "parquet":
cat_names.append("name-cat")
columns = cont_names + cat_names
conts = cont_names >> ops.FillMedian() >> ops.Normalize()
cats = cat_names >> ops.Categorify()
workflow = nvt.Workflow(conts + cats + label_name)
workflow.fit(dataset)
workflow.transform(dataset).to_parquet(tmpdir + "/processed")
data_itr = tf_dataloader.KerasSequenceLoader(
str(tmpdir + "/processed"), # workflow.transform(dataset),
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
buffer_size=gpu_memory_frac,
label_names=label_name,
engine=engine,
shuffle=False,
device=device,
reader_kwargs={"cpu": cpu_true},
)
_ = tf.random.uniform((1,))
rows = 0
for idx in range(len(data_itr)):
X, y = next(data_itr)
# first elements to check epoch-to-epoch consistency
if idx == 0:
X0, y0 = X, y
# check that we have at most batch_size elements
num_samples = y.shape[0]
if num_samples != batch_size:
try:
next(data_itr)
except StopIteration:
rows += num_samples
continue
else:
raise ValueError("Batch size too small at idx {}".format(idx))
# check that all the features in X have the
# appropriate length and that the set of
# their names is exactly the set of names in
# `columns`
these_cols = columns.copy()
for column, x in X.items():
try:
these_cols.remove(column)
except ValueError as e:
raise AssertionError from e
assert x.shape[0] == num_samples
assert len(these_cols) == 0
rows += num_samples
assert (idx + 1) * batch_size >= rows
assert rows == (60 * 24 * 3 + 1)
# if num_samples is equal to batch size,
# we didn't exhaust the iterator and do
# cleanup. Try that now
if num_samples == batch_size:
try:
next(data_itr)
except StopIteration:
pass
else:
raise ValueError
assert not data_itr._working
assert data_itr._batch_itr is None
# check start of next epoch to ensure consistency
X, y = next(data_itr)
assert (y.numpy() == y0.numpy()).all()
for column, x in X.items():
x0 = X0.pop(column)
assert (x.numpy() == x0.numpy()).all()
assert len(X0) == 0
data_itr.stop()
assert not data_itr._working
assert data_itr._batch_itr is None
@pytest.mark.parametrize("batch_size", [1, 2, 3])
def test_mh_support(tmpdir, batch_size):
data = {
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Reviewers": [
["User_A"],
["User_A", "User_E"],
["User_B", "User_C"],
["User_C"],
],
"Engaging User": ["User_B", "User_B", "User_A", "User_D"],
"Embedding": [
[0.1, 0.2, 0.3],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8],
[0.8, 0.4, 0.2],
],
"Post": [1, 2, 3, 4],
}
df = cudf.DataFrame(data)
cat_names = ["Authors", "Reviewers", "Engaging User"]
cont_names = ["Embedding"]
label_name = ["Post"]
cats = cat_names >> ops.HashBucket(num_buckets=10)
workflow = nvt.Workflow(cats + cont_names + label_name)
data_itr = tf_dataloader.KerasSequenceLoader(
workflow.transform(nvt.Dataset(df)),
cat_names=cat_names,
cont_names=cont_names,
label_names=label_name,
batch_size=batch_size,
shuffle=False,
)
nnzs = None
idx = 0
for X, y in data_itr:
assert len(X) == 4
n_samples = y.shape[0]
for mh_name in ["Authors", "Reviewers", "Embedding"]:
# assert (mh_name) in X
array, nnzs = X[mh_name]
nnzs = nnzs.numpy()[:, 0]
array = array.numpy()[:, 0]
if mh_name == "Embedding":
assert (nnzs == 3).all()
else:
lens = [
len(x) for x in data[mh_name][idx * batch_size : idx * batch_size + n_samples]
]
assert (nnzs == np.array(lens)).all()
if mh_name == "Embedding":
assert len(array) == (n_samples * 3)
else:
assert len(array) == sum(lens)
idx += 1
assert idx == (3 // batch_size + 1)
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_validater(tmpdir, batch_size):
n_samples = 9
rand = np.random.RandomState(0)
gdf = cudf.DataFrame({"a": rand.randn(n_samples), "label": rand.randint(2, size=n_samples)})
dataloader = tf_dataloader.KerasSequenceLoader(
nvt.Dataset(gdf),
batch_size=batch_size,
cat_names=[],
cont_names=["a"],
label_names=["label"],
shuffle=False,
)
input_ = tf.keras.Input(name="a", dtype=tf.float32, shape=(1,))
x = tf.keras.layers.Dense(128, "relu")(input_)
x = tf.keras.layers.Dense(1, activation="softmax")(x)
model = tf.keras.Model(inputs=input_, outputs=x)
model.compile("sgd", "binary_crossentropy", metrics=["accuracy", tf.keras.metrics.AUC()])
validater = tf_dataloader.KerasSequenceValidater(dataloader)
model.fit(dataloader, epochs=2, verbose=0, callbacks=[validater])
predictions, labels = [], []
for X, y_true in dataloader:
y_pred = model(X)
labels.extend(y_true.numpy()[:, 0])
predictions.extend(y_pred.numpy()[:, 0])
predictions = np.array(predictions)
labels = np.array(labels)
logs = {}
validater.on_epoch_end(0, logs)
auc_key = [i for i in logs if i.startswith("val_auc")][0]
true_accuracy = (labels == (predictions > 0.5)).mean()
estimated_accuracy = logs["val_accuracy"]
assert np.isclose(true_accuracy, estimated_accuracy, rtol=1e-6)
true_auc = roc_auc_score(labels, predictions)
estimated_auc = logs[auc_key]
assert np.isclose(true_auc, estimated_auc, rtol=1e-6)
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("global_rank", [0, 1])
def test_multigpu_partitioning(datasets, engine, batch_size, global_rank):
cont_names = ["x", "y", "id"]
cat_names = ["name-string", "name-cat"]
label_name = ["label"]
data_loader = tf_dataloader.KerasSequenceLoader(
str(datasets["parquet"]),
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
buffer_size=0.1,
label_names=label_name,
engine=engine,
shuffle=False,
global_size=2,
global_rank=global_rank,
)
indices = data_loader._gather_indices_for_dev(None)
assert indices == [global_rank]
@pytest.mark.parametrize("sparse_dense", [False, True])
def test_sparse_tensors(tmpdir, sparse_dense):
# create small dataset, add values to sparse_list
json_sample = {
"conts": {},
"cats": {
"spar1": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 2,
"multi_max": 4,
"multi_avg": 3,
},
"spar2": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 3,
"multi_max": 5,
"multi_avg": 4,
},
# "": {"dtype": None, "cardinality": 500, "min_entry_size": 1, "max_entry_size": 5},
},
"labels": {"rating": {"dtype": None, "cardinality": 2}},
}
cols = datagen._get_cols_from_schema(json_sample)
df_gen = datagen.DatasetGen(datagen.UniformDistro(), gpu_frac=0.0001)
target_path = os.path.join(tmpdir, "input/")
os.mkdir(target_path)
df_files = df_gen.full_df_create(10000, cols, output=target_path)
spa_lst = ["spar1", "spar2"]
spa_mx = {"spar1": 5, "spar2": 6}
batch_size = 10
data_itr = tf_dataloader.KerasSequenceLoader(
df_files,
cat_names=spa_lst,
cont_names=[],
label_names=["rating"],
batch_size=batch_size,
buffer_size=0.1,
sparse_names=spa_lst,
sparse_max=spa_mx,
sparse_as_dense=sparse_dense,
)
for batch in data_itr:
feats, labs = batch
for col in spa_lst:
feature_tensor = feats[f"{col}"]
if not sparse_dense:
assert list(feature_tensor.shape) == [batch_size, spa_mx[col]]
assert isinstance(feature_tensor, tf.sparse.SparseTensor)
else:
assert feature_tensor.shape[1] == spa_mx[col]
assert not isinstance(feature_tensor, tf.sparse.SparseTensor)
@pytest.mark.skip(reason="not working correctly in ci environment")
@pytest.mark.skipif(importlib.util.find_spec("horovod") is None, reason="needs horovod")
def test_horovod_multigpu(tmpdir):
json_sample = {
"conts": {},
"cats": {
"genres": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 2,
"multi_max": 4,
"multi_avg": 3,
},
"movieId": {
"dtype": None,
"cardinality": 500,
"min_entry_size": 1,
"max_entry_size": 5,
},
"userId": {"dtype": None, "cardinality": 500, "min_entry_size": 1, "max_entry_size": 5},
},
"labels": {"rating": {"dtype": None, "cardinality": 2}},
}
cols = datagen._get_cols_from_schema(json_sample)
df_gen = datagen.DatasetGen(datagen.UniformDistro(), gpu_frac=0.0001)
target_path = os.path.join(tmpdir, "input/")
os.mkdir(target_path)
df_files = df_gen.full_df_create(10000, cols, output=target_path)
# process them
cat_features = nvt.ColumnGroup(["userId", "movieId", "genres"]) >> nvt.ops.Categorify()
ratings = nvt.ColumnGroup(["rating"]) >> (lambda col: (col > 3).astype("int8"))
output = cat_features + ratings
proc = nvt.Workflow(output)
target_path_train = os.path.join(tmpdir, "train/")
os.mkdir(target_path_train)
proc.fit_transform(nvt.Dataset(df_files)).to_parquet(
output_path=target_path_train, out_files_per_proc=5
)
# add new location
target_path = os.path.join(tmpdir, "workflow/")
os.mkdir(target_path)
proc.save(target_path)
curr_path = os.path.abspath(__file__)
repo_root = os.path.relpath(os.path.normpath(os.path.join(curr_path, "../../..")))
hvd_wrap_path = os.path.join(repo_root, "examples/multi-gpu-movielens/hvd_wrapper.sh")
hvd_exam_path = os.path.join(repo_root, "examples/multi-gpu-movielens/tf_trainer.py")
with subprocess.Popen(
[
"horovodrun",
"-np",
"2",
"-H",
"localhost:2",
"sh",
hvd_wrap_path,
"python",
hvd_exam_path,
"--dir_in",
f"{tmpdir}",
"--batch_size",
"1024",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as process:
process.wait()
stdout, stderr = process.communicate()
print(stdout, stderr)
assert "Loss:" in str(stdout)
#
#
|
the-stack_0_20850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroNNTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.batch_data_index = self._create_variable(name='batch_data_index', src=['guest'], dst=['host'])
self.batch_info = self._create_variable(name='batch_info', src=['guest'], dst=['host', 'arbiter'])
self.decrypted_guest_fowrad = self._create_variable(name='decrypted_guest_fowrad', src=['host'], dst=['guest'])
self.decrypted_guest_weight_gradient = self._create_variable(name='decrypted_guest_weight_gradient', src=['host'], dst=['guest'])
self.encrypted_acc_noise = self._create_variable(name='encrypted_acc_noise', src=['host'], dst=['guest'])
self.encrypted_guest_forward = self._create_variable(name='encrypted_guest_forward', src=['guest'], dst=['host'])
self.encrypted_guest_weight_gradient = self._create_variable(name='encrypted_guest_weight_gradient', src=['guest'], dst=['host'])
self.encrypted_host_forward = self._create_variable(name='encrypted_host_forward', src=['host'], dst=['guest'])
self.host_backward = self._create_variable(name='host_backward', src=['guest'], dst=['host'])
self.is_converge = self._create_variable(name='is_converge', src=['guest'], dst=['host'])
|
the-stack_0_20851 |
import torch
from torch import nn
from torch.autograd import Variable
from examples.Helper import Helper
from examples.Regression_Network import LSTM1
import matplotlib.pyplot as plt
class Train():
def __init__(self):
#define train parameters
self.num_epochs = 1000 # 1000 epochs
self.learning_rate = 0.001 # 0.001 lr
#define netwrok architecture
self.input_size = 512 # number of features
self.hidden_size = 1 # number of features in hidden state
self.num_layers = 1 # number of stacked lstm layers
self.num_classes = 1 # number of output classes
self.sequence_length=20
self.Loss_history=[]
lstm1 = LSTM1(num_classes=self.num_classes, input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers,
seq_length=self.sequence_length) # our lstm class
#MSE criteria for regression network
self.criterion = torch.nn.MSELoss(reduction='sum') # mean-squared error for regression
self.optimizer = torch.optim.Adam(lstm1.parameters(), lr= self.learning_rate)
self.model = lstm1
self.help_tool=Helper()
self.train_accuracy_MSE=[]
self.predicted=0
self.GroundTruth=0
self.GroundTruth_history=[]
self.epochs=[]
def train(self,lstm1,event_data,Angular_velocities,epoch):
# for epoch in range(self.num_epochs):
outputs = lstm1.forward(event_data) # forward pass
self.optimizer.zero_grad() # caluclate the gradient, manually setting to 0
# obtain the loss function
loss = self.criterion(outputs[-1].double(), Angular_velocities[-1][2].double())
self.epochs.append(epoch)
self.train_accuracy_MSE.append((outputs[-1]-Angular_velocities[-1][2]))
self.predicted=outputs[-1]
self.GroundTruth=Angular_velocities[-1][2]
self.Loss_history.append(loss)
self.GroundTruth_history.append(self.GroundTruth)
loss.backward() # calculates the loss of the loss function
# print('loss',loss)
self.optimizer.step() # improve from loss, i.e backprop
# if epoch % 5 == 0 and epoch>0 :
# print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
# self.help_tool.SavemyModel(lstm1)
def print_train_data(self):
# self.plt.fi
self.plt.plot(self.Loss_history)
plt.title('loss history')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show() |
the-stack_0_20852 | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
results = dict()
def solution(N):
# write your code in Python 3.6
# 1000000
if N in results:
return results[N]
else:
result = f_series(N)
results[N] = result
return result
def f_series(n):
if n < 2:
return n
else:
if n in results:
return results[n]
else:
result = (f_series(n - 1) + f_series(n - 2)) % 1000000
results[n] = result
return result
|
the-stack_0_20854 | """
Script calculates sea ice extent in the Bering Sea from SIC fields
Notes
-----
Author : Zachary Labe
Date : 4 March 2018
"""
### Import modules
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import datetime
import statsmodels.api as sm
### Define directories
directorydata = '/surtsey/zlabe/seaice/SIC_Alaska/'
directorydata2 = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Data/'
directoryfigure = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting Bering SIE - %s----' % titletime)
### Define years
years = np.arange(1850,2019+1,1)
yearsat = np.arange(1979,2019+1,1)
#### Retrieve data from NSIDC regional extent in Bering Sea
beringold = np.genfromtxt(directorydata2 + 'BeringSeaIce_NSIDC_Feb.txt')
bering = beringold/1e6
### Retrieve data from historical sea ice atlas
filename = directorydata + 'Alaska_SIC_Feb_1850-2019.nc'
data = Dataset(filename)
iceold = data.variables['sic'][:]
lat1 = data.variables['lat'][:]
lon1 = data.variables['lon'][:]
data.close()
print('Completed: Data read!')
### Meshgrid
lon2,lat2 = np.meshgrid(lon1,lat1)
### Bering Sea Ice Mask
latq = lat2.copy()
latq[np.where(latq>67)] = 0.
latq[np.where(latq>0.)] = 1
### Mask values below 20%
ice = iceold * latq
### Extent is a binary 0 or 1 for 15% SIC threshold
thresh=15
ice[np.where(ice<thresh)]=np.nan
ice[np.where(ice>=thresh)]=1
### Calculate sea ice extent
ext = np.zeros((years.shape[0]))
valyr = np.zeros((ice.shape))
for yr in range(years.shape[0]):
for i in range(lat2.shape[0]):
for j in range(lon2.shape[1]):
if ice[yr,i,j] == 1.0:
### Area of 0.25 grid cell [769.3 = (111.32/4) * (110.57/4)]
valyr[yr,i,j] = 769.3 * np.cos(np.radians(lat2[i,j]))
ext[yr] = np.nansum(valyr[yr,:,:])/1e6
### Save sea ice extent data (yearly) from sea ice atlas
np.savetxt(directorydata2 + 'Bering_SIE_iceatlas_02_1850-2019.txt',ext,
delimiter=',',header='File contains February SIE from historical' \
'\n ice atlas (University of Alaska) for years' \
'\n 1850-2019 \n')
### Calculate loess
smoothed = sm.nonparametric.lowess(ext,np.arange(years.shape[0]))
###############################################################################
###############################################################################
###############################################################################
### Plot figures
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=5.5,width=2,which='major',color='darkgrey')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
plt.plot(years,ext,linewidth=2,color='deepskyblue',
label=r'\textbf{Historical Sea Ice Atlas, University of Alaska}',
clip_on=False)
plt.plot(yearsat,bering,linewidth=0.9,color='r',
label=r'\textbf{NSIDC Sea Ice Index, Version 3}')
plt.plot(years,smoothed[:,1],linewidth=0.9,linestyle='--',
dashes=(1, 0.2),color='w',label=r'\textbf{Lowess Smoothing}')
xlabels = list(map(str,np.arange(1850,2021,25)))
plt.xticks(np.arange(1850,2021,25),xlabels,rotation=0,color='darkgrey')
plt.xlim([1850,2020])
plt.yticks(np.arange(0,2.5,0.2),list(map(str,np.round(np.arange(0,2.5,0.2),2))),
color='darkgrey')
plt.ylim([0.2,1])
fig.suptitle(r'\textbf{FEBRUARY : BERING SEA ICE}',
fontsize=22,color='darkgrey')
plt.ylabel(r'\textbf{Extent [$\bf{\times 10^{6}}$\ \textbf{km}$\bf{^2}$]}',
fontsize=17,alpha=1,color='darkgrey',rotation=90)
le = plt.legend(shadow=False,fontsize=8,loc='upper center',
bbox_to_anchor=(0.285, 0.17),fancybox=True,frameon=False,ncol=1)
for text in le.get_texts():
text.set_color('darkgrey')
plt.savefig(directoryfigure + 'Bering_SIE_Atlas_February_2019.png',dpi=300)
print('Completed: Figure plotted!')
print('Completed: Script done!') |
the-stack_0_20856 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PagingGetOdataMultiplePagesOptions(Model):
"""Additional parameters for get_odata_multiple_pages operation.
:param maxresults: Sets the maximum number of items to return in the
response.
:type maxresults: int
:param timeout: Sets the maximum time that the server can spend processing
the request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
"""
_attribute_map = {
'maxresults': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
}
def __init__(self, **kwargs):
super(PagingGetOdataMultiplePagesOptions, self).__init__(**kwargs)
self.maxresults = kwargs.get('maxresults', None)
self.timeout = kwargs.get('timeout', 30)
|
the-stack_0_20857 | import unittest
from bandwidth.voice.bxml import Response
from lxml.builder import E
class ResponseTests(unittest.TestCase):
def test_to_xml(self):
"""
to_xml() should build XML
"""
estimated_xml = b'<xml><Response><Hangup/></Response></xml>'
xml = Response(E.Hangup())
self.assertEqual(estimated_xml, xml.to_xml())
def test_to_xml_with_several_verbs(self):
"""
to_xml() should build XML (some verbs)
"""
estimated_xml = b'<xml><Response><Pause duration="10"/><Hangup/></Response></xml>'
xml = Response(E.Pause({'duration': '10'}), E.Hangup())
self.assertEqual(estimated_xml, xml.to_xml())
def test__str__(self):
"""
__str__() should return XML as string
"""
estimated_xml = '<xml><Response><Hangup/></Response></xml>'
xml = Response(E.Hangup())
self.assertEqual(estimated_xml, str(xml))
|
the-stack_0_20858 | from .endpoint import Endpoint, api
from .exceptions import MissingRequiredFieldError
from .. import RequestFactory, SubscriptionItem, PaginationItem
import logging
logger = logging.getLogger("tableau.endpoint.subscriptions")
class Subscriptions(Endpoint):
@property
def baseurl(self):
return "{0}/sites/{1}/subscriptions".format(self.parent_srv.baseurl, self.parent_srv.site_id)
@api(version="2.3")
def get(self, req_options=None):
logger.info("Querying all subscriptions for the site")
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
all_subscriptions = SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)
return all_subscriptions, pagination_item
@api(version="2.3")
def get_by_id(self, subscription_id):
if not subscription_id:
error = "No Subscription ID provided"
raise ValueError(error)
logger.info("Querying a single subscription by id ({})".format(subscription_id))
url = "{}/{}".format(self.baseurl, subscription_id)
server_response = self.get_request(url)
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="2.3")
def create(self, subscription_item):
if not subscription_item:
error = "No Susbcription provided"
raise ValueError(error)
logger.info("Creating a subscription ({})".format(subscription_item))
url = self.baseurl
create_req = RequestFactory.Subscription.create_req(subscription_item)
server_response = self.post_request(url, create_req)
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="2.3")
def delete(self, subscription_id):
if not subscription_id:
error = "Subscription ID undefined."
raise ValueError(error)
url = "{0}/{1}".format(self.baseurl, subscription_id)
self.delete_request(url)
logger.info("Deleted subscription (ID: {0})".format(subscription_id))
@api(version="2.3")
def update(self, subscription_item):
if not subscription_item.id:
error = "Subscription item missing ID. Subscription must be retrieved from server first."
raise MissingRequiredFieldError(error)
url = "{0}/{1}".format(self.baseurl, subscription_item.id)
update_req = RequestFactory.Subscription.update_req(subscription_item)
server_response = self.put_request(url, update_req)
logger.info("Updated subscription item (ID: {0})".format(subscription_item.id))
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
|
the-stack_0_20861 | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# Michael Würtenberger
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
import logging
# external packages
import numpy as np
import astropy._erfa as erfa
# local imports
from mw4.modeldata.alignstars import generateAlignStars
class Hipparcos(object):
"""
The class Data inherits all information and handling of hipparcos data and other
attributes. this includes data about the alignment stars defined in generateAlignStars,
their ra dec coordinates, proper motion, parallax and radial velocity and the
calculation of data for display and slew commands
>>> hip = Hipparcos(
>>> app=app
>>> mwGlob=mwglob
>>> )
"""
__all__ = ['Hipparcos',
'calculateAlignStarsPositionsAltAz',
'getAlignStarRaDecFromIndex',
]
version = '0.100.0'
logger = logging.getLogger(__name__)
def __init__(self,
app=None,
mwGlob=None,
):
self.app = app
self.mwGlob = mwGlob
self.lat = app.mount.obsSite.location.latitude.degrees
self.name = list()
self.alt = list()
self.az = list()
self.alignStars = generateAlignStars()
self.calculateAlignStarPositionsAltAz()
def calculateAlignStarPositionsAltAz(self):
"""
calculateAlignStarPositionsAltAz does calculate the star coordinates from give data
out of generated star list. calculation routines are from astropy erfa. atco13 does
the results based on proper motion, parallax and radial velocity and need J2000
coordinates. because of using the hipparcos catalogue, which is based on J1991,
25 epoch the pre calculation from J1991,25 to J2000 is done already when generating
the alignstars file. there is no refraction data taken into account, because we need
this only for display purpose and for this, the accuracy is more than sufficient.
:return: lists for alt, az and name of star
"""
location = self.app.mount.obsSite.location
if location is None:
return False
t = self.app.mount.obsSite.timeJD
star = list(self.alignStars.values())
self.name = list(self.alignStars.keys())
aob, zob, hob, dob, rob, eo = erfa.atco13([x[0] for x in star],
[x[1] for x in star],
[x[2] for x in star],
[x[3] for x in star],
[x[4] for x in star],
[x[5] for x in star],
t.ut1,
0.0,
t.dut1,
location.longitude.radians,
location.latitude.radians,
location.elevation.m,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0)
self.az = aob * 360 / 2 / np.pi
self.alt = 90.0 - zob * 360 / 2 / np.pi
return True
def getAlignStarRaDecFromName(self, name):
"""
getAlignStarRaDecFromName does calculate the star coordinates from give data
out of generated star list. calculation routines are from astropy erfa. atco13 does
the results based on proper motion, parallax and radial velocity and need J2000
coordinates. because of using the hipparcos catalogue, which is based on J1991,
25 epoch the pre calculation from J1991,25 to J2000 is done already when generating
the alignstars file. there is no refraction data taken into account, because we need
this only for display purpose and for this, the accuracy is more than sufficient.
the return values are in JNow epoch as the mount only handles this epoch !
:param name: name of star
:return: values for ra, dec in hours / degrees in JNow epoch !
"""
if name not in self.alignStars:
return None, None
t = self.app.mount.obsSite.ts.now()
values = self.alignStars[name]
ra, dec, eo = erfa.atci13(values[0],
values[1],
values[2],
values[3],
values[4],
values[5],
t.ut1,
0.0,
)
ra = erfa.anp(ra - eo) * 24 / 2 / np.pi
dec = dec * 360 / 2 / np.pi
return ra, dec
|
the-stack_0_20864 | import os
import io
import xlsxwriter
import mimetypes
import string
import secrets
from datetime import datetime, timedelta
from .middlewares import login_required
from flask import Flask, json, g, request, send_file, Response
from flask_cors import CORS
from .sendgrid import email_send
from bson.json_util import dumps
from .login import query_member_record,create_new_member,create_password,login_call,get_users,update_user_details,saveSession, get_user_by_id, delete_user_details,get_current_user,delete_user_session
from .pre_processing import pre_processing
from werkzeug.utils import secure_filename
from .dbquery import getOneRequestByID, doUpdateRequest, doInsertRequest, doDeleteRequest, getAnalyticData, getAnalyticDataByWeek
from .prioritize import getOutstandingRequestsByPage
app = Flask(__name__)
CORS(app)
def json_response(payload, status=200):
return (json.dumps(payload), status, {'content-type': 'application/json'})
def id_generator():
generated_value = ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16))
return generated_value
@app.route("/api/verification-email", methods=["POST"])
@login_required
def sendEmail():
request_data = request.get_json()
verification_data = {
'_id': request_data.get('_id'),
'isAdmin': request_data.get('isAdmin'),
'verificationCode':id_generator(),
'verifyBy': (datetime.utcnow() + timedelta(days=1)).isoformat()
}
updated_user = update_user_details(**verification_data)
if updated_user:
message_data = {
'template_data': {'user_name': request_data['user_name'], 'verification_url': request_data.get('url') + '?_id=' + updated_user['verificationCode']},
'to_email': request_data['to_email'],
'template_id': 'd-fd4424ab8c5b4cdd982e1b2ededa7e96'
}
received_response = email_send(**message_data)
if(received_response == 202):
return json_response({'message':'Sent verification email successfully.'}, 200)
else:
return json_response({'error': received_response}, 500)
else:
return json_response({'error': received_response}, 500)
@app.route("/api/verify-email", methods=["POST"])
def verifyEmail():
request_data = request.get_json()
query_data = {
'verificationCode': {'$eq':request_data.get('verification_code')},
'verifyBy': {'$gte': datetime.utcnow()}
}
try:
retrieved_user = query_member_record(**query_data)
update_data = {
'_id': str(retrieved_user.get('_id')),
'isActivated': True,
'isAdmin':retrieved_user.get('isAdmin'),
'verificationCode': None,
'verifyBy': None
}
updated_user = update_user_details(**update_data)
return json_response(updated_user, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/password", methods=["PATCH"])
def set_password():
request_data = request.get_json()
if not request_data.get('password'):
return json_response({'error':'Unable to process this request'}, 500)
authorization = request.headers.get("authorization", None)
request_data['authorization'] = authorization
try:
create_password_status = create_password(**request_data)
# Check and respond according to the status returned by the function.
if (create_password_status == 404):
return json_response({'error':'User not found'}, 404)
elif (create_password_status == 401):
return json.dumps({'error': 'No authorization token provided'}), 401, {'Content-type': 'application/json'}
else:
return json_response({'message':'Password created successfully'}, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/nlp", methods=["POST"])
def receive_file():
file_received = request.files['file_data']
received_string = file_received.read().decode("latin-1")
pre_processing(received_string)
return json_response({'message':'Data inserted'}, 200)
@app.route("/api/user", methods=["GET"])
@login_required
def get_user():
search_query = {
'selector': json.loads(request.args.to_dict().pop('where'))
}
for k, v in request.args.items():
search_query[k] = v
result = get_users(**search_query)
if (result):
return json_response({'data': result.get('users'), 'total': result.get('total')}, 200)
else:
return json_response({'data': []}, 200)
@app.route("/api/user", methods=["POST"])
@login_required
def add_user():
# Variable to receive the new user data passed by the sign up page.
request_data = request.get_json()
try:
# Call the create_new_member() in login.py to create a new member
create_member_status = create_new_member(**request_data)
# Check and respond according to the status returned by the function.
if (create_member_status.get('status') == 200):
return json_response(create_member_status.get('user'), 200)
else:
return json_response({'error':'An account with the email already exists.'}, 400)
except Exception as e:
print('e',e)
return json_response({'error':'Account cannot be created.'}, 500)
@app.route("/api/user", methods=["PATCH"])
@login_required
def update_user():
request_data = request.get_json()
try:
updated_user = update_user_details(**request_data)
return json_response(updated_user, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/<string:userID>/user", methods=["GET"])
@login_required
def get_single_user(userID):
request_data = {'_id': userID}
try:
retrieved_user = get_user_by_id(**request_data)
if (retrieved_user == 404):
return json_response({'error':'User not found'}, 404)
else:
return json_response(retrieved_user, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/<string:userID>/user", methods=["DELETE"])
@login_required
def delete_user(userID):
request_data = {'_id': userID}
try:
deleted_user = delete_user_details(**request_data)
if (deleted_user > 0):
return json_response({'_id': userID}, 200)
else:
return json_response({'error':'Unable to process this request'}, 500)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/login", methods=["POST"])
def login():
# Variable to receive the login information from login page.
request_data = request.get_json()
# Call the login() funcition in the login.py to perform login check.
login_status = login_call(**request_data)
if (login_status == 401):
return json_response({'error':'Invalid Username or Password. Please try again'}, 401)
elif (login_status == 404):
return json_response({'error':'User not found'}, 404)
else:
try:
savedSession = saveSession(login_status)
return json_response({'access_token': savedSession.get('access_token'), 'expires_at': savedSession.get('expires_at')}, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/session", methods=["GET"])
@login_required
def get_loggedin_user():
authorization = request.headers.get("authorization", None)
if not authorization:
return json.dumps({'error': 'No authorization token provided'}), 401, {'Content-type': 'application/json'}
try:
token = authorization.split(' ')[1]
request_data = {
'access_token': token
}
current_user = get_current_user(**request_data)
if (current_user == 404):
return json_response({'error':'User not found'}, 404)
else:
return json_response(current_user, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/session", methods=["DELETE"])
# @login_required
def delete_session():
authorization = request.headers.get("authorization", None)
if not authorization:
return json.dumps({'error': 'no authorization token provided'}), 401, {'Content-type': 'application/json'}
try:
token = authorization.split(' ')[1]
request_data = {
'access_token': token
}
deleted_session = delete_user_session(**request_data)
if (deleted_session == 200):
return json_response({'message':'User logged out successfully'}, 200)
else:
return json_response({'error':'Unable to process this request'}, 500)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
#############################################################
#
# Database Calls To Query, Create, Insert, Update and Delete
#
#############################################################
# Endpoint to query for all requests based on req_status.
@app.route("/api/requests", methods=["GET"])
@login_required
def queryRequests():
queryString = {}
for k, v in request.args.items():
queryString[k] = v
queryString["status"] = 1
if "_id" in queryString:
return getOneRequestByID(**queryString)
else:
try:
if queryString["function"] == "CompletedRequestsByPage":
queryString["status"] = 3
elif queryString["function"] == "InProgressByPage":
queryString["status"] = 2
requests_retrieved = getOutstandingRequestsByPage(**queryString)
return requests_retrieved
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
# Endpoint to peform update of requests collection.
@app.route("/api/requests", methods=["PATCH"])
@login_required
def updateRequests():
queryString = request.get_json()
return doUpdateRequest(**queryString)
@app.route("/api/requests", methods=["POST"])
@login_required
def insertRequests():
queryString = request.get_json()
return doInsertRequest(**queryString)
# Call this endpoint to delete request from the queue.
@app.route("/api/requests", methods=["DELETE"])
@login_required
def deleteRequests():
queryString = {}
for k, v in request.args.items():
queryString[k] = v
return doDeleteRequest(**queryString)
# Endpoint to get requests based on nurse_id and date parameters for anlytic purposes.
@app.route("/api/analyseData", methods=["GET"])
@login_required
def analyseData():
queryString = {}
for k, v in request.args.items():
queryString[k] = v
try:
analytic_data = getAnalyticData(**queryString)
return json_response({"data": analytic_data}, 200)
except Exception as e:
print('e',e)
return json_response({'error':'Unable to process this request'}, 500)
@app.route("/api/analyseData/excel", methods=["GET"])
@login_required
def exportData():
queryString = {}
for k, v in request.args.items():
queryString[k] = v
analytic_data = getAnalyticData(**queryString)
response = Response()
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
worksheet = workbook.add_worksheet('Sheet_1')
for i, d in enumerate(analytic_data):
for j, res in enumerate(d):
if (i == 0):
worksheet.write(i, j, 'Date' if res == 'name' else 'No. of requests')
if (res == 'name'):
worksheet.write(i+1, j, datetime.strptime(d[res], '%Y-%m-%d %H:%M:%S').strftime("%m/%d/%Y %I:%M %p"))
else:
worksheet.write(i+1, j, d[res])
workbook.close()
output.seek(0)
response.data = output.read()
file_name = 'my_file_{}.xlsx'.format(
datetime.now().strftime('%d/%m/%Y'))
mimetype_tuple = mimetypes.guess_type(file_name)
response_headers = {
'Pragma': "public", # required,
'Expires': '0',
'Cache-Control': 'must-revalidate, post-check=0, pre-check=0',
'Cache-Control': 'private', # required for certain browsers,
'Content-Type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'Content-Disposition': 'attachment; filename=\"%s\";' % file_name,
'Content-Transfer-Encoding': 'binary',
'Content-Length': len(response.data)
}
response.headers = response_headers
if not mimetype_tuple[1] is None:
response.headers['Content-Encoding'] = mimetype_tuple[1]
return response
|
the-stack_0_20866 | import unittest
from msdm.core.utils.funcutils import cached_property, method_cache
class FuncUtilsTests(unittest.TestCase):
def test_cached_property(self):
class X(object):
def __init__(self):
self._call_count = 0
@cached_property
def expensive(self):
self._call_count += 1
return 1
instance = X()
# initial state
assert instance._call_count == 0
assert not hasattr(instance, '_cached_expensive')
# state after one call
assert instance.expensive == 1
assert hasattr(instance, '_cached_expensive')
assert getattr(instance, '_cached_expensive') == 1
assert instance._call_count == 1
# expecting caching to work for second & subsequent calls
instance.expensive
assert instance._call_count == 1
# Ensure it's only usable in the right place
with self.assertRaises(AssertionError) as context:
class X(object):
@cached_property
def expensive(self, argument):
return 1
def test_method_cache(self):
class X(object):
def __init__(self):
self._call_count = 0
@method_cache
def expensive(self, argument):
self._call_count += 1
return argument * 2
instance = X()
instance.expensive(3)
instance.expensive(3)
assert instance._cache_expensive[((3,), None)] == 6
assert instance._cache_info_expensive == dict(hits=2, misses=1)
assert instance._call_count == 1
# Can handle other entries too
instance.expensive(4)
assert instance._cache_expensive[((4,), None)] == 8
assert instance._cache_info_expensive == dict(hits=3, misses=2)
assert instance._call_count == 2
# And we still cache appropriately
instance.expensive(4)
assert instance._cache_info_expensive == dict(hits=4, misses=2)
assert instance._call_count == 2
if __name__ == '__main__':
unittest.main()
|
the-stack_0_20867 | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Emoji codepoint definitions.
See https://emojipedia.org for list of available unicode emojis.
"""
from __future__ import unicode_literals
import collections
emoji = collections.namedtuple('emoji', 'code help')
EMOJI_MAP = {
'BUCKET': emoji('🪣', 'Storage bucket'),
'CAMERA': emoji('📷', 'Screenshot activity'),
'FISHING_POLE': emoji('🎣', 'Phishing'),
'GLOBE': emoji('🌏', 'The world'),
'ID_BUTTON': emoji('🆔', 'Account ID'),
'LINK': emoji('🔗', 'Events Linked'),
'LOCK': emoji('🔒', 'Logon activity'),
'LOCOMOTIVE': emoji('🚂', 'Execution activity'),
'MAGNIFYING_GLASS': emoji('🔎', 'Search related activity'),
'PERSON_STANDING': emoji('🧍', 'Person'),
'SATELLITE': emoji('📡', 'Domain activity'),
'SCREEN': emoji('🖵', 'Screensaver activity'),
'SKULL': emoji('💀', 'Threat intel match'),
'SKULL_CROSSBONE': emoji('☠', 'Suspicious entry'),
'SLEEPING_FACE': emoji('😴', 'Activity outside of regular hours'),
'SPARKLES': emoji('✨', 'New entity created'),
'UNLOCK': emoji('🔓', 'Logoff activity'),
'WASTEBASKET': emoji('🗑', 'Deletion activity')
}
def get_emoji(name):
"""Returns a Unicode for an emoji given the name or blank if not saved.
Args:
name: string with the emoji name.
Returns:
Unicode string for the emoji if it exists or a blank string otherwise.
"""
name_upper = name.upper()
emoji_object = EMOJI_MAP.get(name_upper)
if emoji_object:
return emoji_object.code
return ''
def get_helper_from_unicode(code):
"""Returns a helper string from an emoji Unicode code point.
Args:
code: a Unicode code point for an emoji.
Returns:
Helper text as a string or an empty string if emoji is not configured.
"""
code_upper = code.upper()
for emoji_object in iter(EMOJI_MAP.values()):
if code_upper == emoji_object.code.upper():
return emoji_object.help
return ''
def get_emojis_as_dict():
"""Returns a dictionary with emoji codes and helper texts.
Returns:
Dict with emoji unicode code points as key and helper text as value.
"""
return {e.code: e.help for e in iter(EMOJI_MAP.values())}
|
the-stack_0_20869 | #!/usr/bin/env python3
# encoding: utf-8
import cv2
import tensorflow as tf
import sys, os, h5py
import numpy as np
import tensorflow.contrib.layers as layers
import random
import pandas as pd
from random import shuffle
from random import randint
from tqdm import tqdm
import time
from input_data_v1 import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class C3dModel(object):
def __init__(self,
num_class = 101,
keep_prob = 0.6,
batch_size = 3,
epoch=40,
lr = 1e-4):
self.IMG_WIDTH = 171
self.IMG_HEIGHT = 128
self.CROP_WIDTH = 112
self.CROP_HEIGHT = 112
self.graph = tf.Graph()
self.num_class = num_class
self.epoch = epoch
self.CLIP_LENGTH = 16
self.keep_prob = keep_prob
self.batch_size = batch_size
decay_epoch=10 #每5个epoch改变一次学习率
# train clip: 9537*5 CLIP=5
# test clip: 3783*5 CLIP=5
# train clip: 9537*3 CLIP=3
# test clip: 3783*3 CLIP=3
self.n_step_epoch=int( 457/batch_size)
with self.graph.as_default():
self.inputs = tf.placeholder(tf.float32, [None, self.CLIP_LENGTH, self.CROP_HEIGHT, self.CROP_WIDTH, 3])
self.labels = tf.placeholder(tf.int64, [batch_size,])
self.initializer = layers.xavier_initializer()
self.global_step = tf.Variable(0, trainable = False, name = "global_step")
self.lr = tf.train.exponential_decay(lr, self.global_step, int(decay_epoch*self.n_step_epoch), 1e-1, True)
tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, self.global_step)
def conv3d(self, inputs, shape, name,w_name,b_name):
with self.graph.as_default():
with tf.variable_scope('var_name') as var_scope:
W = tf.get_variable(name = w_name, shape = shape, initializer = self.initializer, dtype = tf.float32)
b = tf.get_variable(name = b_name, shape = shape[-1], initializer = tf.zeros_initializer(), dtype = tf.float32)
tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)
tf.add_to_collection(tf.GraphKeys.BIASES, b)
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv3d(inputs, W, strides = [1, 1, 1, 1, 1], padding = "SAME"), b))
# filter:
# [filter_depth, filter_height, filter_width, in_channels,out_channels]
def fc(self, inputs, shape, name,w_name,b_name,activation = True):
with self.graph.as_default():
with tf.variable_scope('var_name') as var_scope:
W = tf.get_variable(name = w_name, shape = shape, initializer = self.initializer, dtype = tf.float32)
b = tf.get_variable(name = b_name, shape = shape[-1], initializer = tf.zeros_initializer(), dtype = tf.float32)
tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)
tf.add_to_collection(tf.GraphKeys.BIASES, b)
if activation:
return tf.nn.relu(tf.nn.bias_add(tf.matmul(inputs, W), b))
else:
return tf.nn.bias_add(tf.matmul(inputs, W), b)
# netstrucet is an orderdict with form {"conv": [shape, name]}
def parseNet(self, net, netstruct, istraining = True):
for key in netstruct:
if key[0] == "conv":
net = self.conv3d(net, key[2], key[1],key[3], key[4])
elif key[0] == "fc":
net = self.fc(net, key[2], key[1], key[3], key[4],activation = key[-1])
elif key[0] == "maxpool":
net = tf.nn.max_pool3d(net, ksize = key[2], strides = key[2], padding = "SAME", name = key[1])
elif key[0] == "dropout" and istraining:
net = tf.nn.dropout(net, key[2], name = key[1])
elif key[0] == "reshape":
net = tf.reshape(net, key[-1])
elif key[0] == "softmax":
net = tf.nn.softmax(net)
elif key[0] == "transpose":
net = tf.transpose(net, perm=key[-1])
return net
def test(self, test_list, modelpath):
with self.graph.as_default():
c3d_net = [
["conv", "conv1", [3, 3, 3, 3, 64], 'wc1', 'bc1'],
["maxpool", "pool1", [1, 1, 2, 2, 1]],
["conv", "conv2", [3, 3, 3, 64, 128], 'wc2', 'bc2'],
["maxpool", "pool2", [1, 2, 2, 2, 1]],
["conv", "conv3a", [3, 3, 3, 128, 256], 'wc3a', 'bc3a'],
["conv", "conv3b", [3, 3, 3, 256, 256], 'wc3b', 'bc3b'],
["maxpool", "pool3", [1, 2, 2, 2, 1]],
["conv", "conv4a", [3, 3, 3, 256, 512], 'wc4a', 'bc4a'],
["conv", "conv4b", [3, 3, 3, 512, 512], 'wc4b', 'bc4b'],
["maxpool", "pool4", [1, 2, 2, 2, 1]],
["conv", "conv5a", [3, 3, 3, 512, 512], 'wc5a', 'bc5a'],
["conv", "conv5b", [3, 3, 3, 512, 512], 'wc5b', 'bc5b'],
["maxpool", "pool5", [1, 2, 2, 2, 1]],
["transpose", [0, 1, 4, 2, 3]], #only use it if you restore the sports1m_finetuning_ucf101.model, otherwise uncomment it,(e.g use conv3d_deepnetA_sport1m_iter_1900000_TF.model)
["reshape", [-1, 8192]],
["fc", "fc1", [8192, 4096], 'wd1', 'bd1', True],
["dropout", "dropout1", self.keep_prob],
["fc", "fc2", [4096, 4096],'wd2','bd2', True],
["dropout", "dropout2", self.keep_prob],
["fc", "fc3", [4096, self.num_class],'wout','bout',False],
]
# print(tf.trainable_variables())
# print(var_list)
# print(tf.get_collection(tf.GraphKeys.WEIGHTS))
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.5)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=config, graph=self.graph) as sess:
logits = self.parseNet(self.inputs, c3d_net)
softmax_logits = tf.nn.softmax(logits)
# int_label = tf.one_hot(self.labels, self.num_class)
int_label = self.labels # [bs,101]-->[bs*4 or 8 or 16,101]
# int_label=tf.concat(
# [int_label,int_label,int_label,int_label,],axis=0)
# int_label=tf.cast(int_label,dtype=tf.int64)
task_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=int_label))
# task_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = int_label))
# task_loss = -tf.reduce_sum(int_label*tf.log(logits))
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(softmax_logits, axis=-1), int_label), tf.float32))
right_count = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(softmax_logits, axis=1), int_label), tf.int32))
reg_loss = layers.apply_regularization(layers.l2_regularizer(5e-4),
tf.get_collection(tf.GraphKeys.WEIGHTS))
total_loss = task_loss + reg_loss
# train_var_list = [v for v in tf.trainable_variables() if v.name.find("conv") == -1]
train_op = tf.train.GradientDescentOptimizer(self.lr).minimize(
total_loss, global_step=self.global_step)
# train_op = tf.train.MomentumOptimizer(self.lr,0.9).minimize(
# total_loss, global_step = self.global_step,var_list=train_var_list)
total_para = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print('total_para:', total_para) # all CDC9 :28613120 #pool5 27655936
# train clip:762960
# test clip:302640
init = tf.global_variables_initializer()
# var_list = [v for v in tf.trainable_variables() if v.name.find("conv") != -1] # 初始化只加载卷积层参数
# print(var_list)
# saver = tf.train.Saver(tf.global_variables())
sess.run(init)
saver = tf.train.Saver(tf.trainable_variables())
# saver.restore(sess, tf.train.latest_checkpoint(modelpath))
# saver.restore(sess, modelpath + "sports1m_finetuning_ucf101.model")
saver.restore(sess, modelpath + "c3d_ucf_model-4999")
print("Model Loading Done!")
step = 0
print_freq = 2
next_start_pos = 0
for one_epoch in range(1):
epostarttime = time.time()
starttime = time.time()
total_v = 0.0
test_correct_num = 0
total_correct = 0
total_all = 0
for i in tqdm(range(int(142 / self.batch_size))):
step += 1
total_v += self.batch_size
train_batch, label_batch, next_start_pos, _, _ = read_clip_and_label(
filename=test_list,
batch_size=self.batch_size,
start_pos=next_start_pos,
num_frames_per_clip=self.CLIP_LENGTH,
height=self.IMG_HEIGHT,
width=self.IMG_WIDTH,
shuffle=False)
assert len(train_batch)==self.batch_size
train_batch = train_aug(train_batch, is_train=False, Crop_heith=self.CROP_HEIGHT,
Crop_width=self.CROP_WIDTH,norm=True)
val_feed = {self.inputs: train_batch, self.labels: label_batch}
test_correct_num += sess.run(right_count, val_feed)
total_correct += test_correct_num
total_all += total_v
print('test acc:', test_correct_num / total_v, 'test_correct_num:', test_correct_num,
'total_v:', total_v)
print('Total test acc:', total_correct / total_all)
if __name__ == "__main__":
c3dnet = C3dModel()
c3dnet.test(test_list="./KTH-testSAF.list",
modelpath="./models/kth-saf/") |
the-stack_0_20870 | """
YOLOv3 Model
"""
import numpy as np
import torch
import torch.nn as nn
from torch import jit
import torchvision.transforms as T
from torchvision.ops import nms
def coco80_to_coco91_class(label):
# converts 80-index (val2014) to 91-index (paper)
coco91_classes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
x = [coco91_classes[i] for i in label]
x = torch.tensor(x, dtype=torch.long)
return x
def yolo_to_coco_traffic(label):
"""
Converts 0-index yolo data to custom COCO traffic data.
The custom dataset has the same labels as COCO, with the extensions 92,93 and 94.
"""
traffic_classes = np.arange(1, 15)
x = [traffic_classes[i] for i in label]
# Map traffic labels to COCO label.
traffic_to_coco = {1:1 , 2:2 ,3:3 ,4:4 ,5:6 ,6:7 ,7:8 ,8:11 , 9:13 , 10:17 ,11:18 , 12:92 , 13:93 , 14:94}
x = [traffic_to_coco[i] for i in x]
x = torch.tensor(x, dtype=torch.long)
return x
def yolo_model():
"""
Loads the YOLOv5 model from ultralytics
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload = True)
model.eval()
return model
def yolo_model_traffic():
"""
Loads the custom YOLOv5 model. It has to be placed into /yolo.
"""
weights = 'yolo/yolov5sTraffic.pt'
model = torch.hub.load('ultralytics/yolov5', 'custom', weights, force_reload=True)
model.eval()
return model
def yolo_predict(model, frame, thresh = 0.6):
"""
Predict with yolo model
Args:
frame - OpenCV image in BGR
Return:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
# Predict
output = model(frame)
# Unpack the output
result = output.xyxy[0]
boxes = result[:,:4]
conf = result[:,4]
labels = result[:,5].type(torch.LongTensor)
# Apply threshold
keep = conf > thresh
boxes = boxes[keep]
conf = conf[keep]
labels = labels[keep]
# Convert COCO labels because some classes were removed
labels = coco80_to_coco91_class(labels)
return boxes, labels, conf
def yolo_traffic_predict(model, frame, thresh = 0.6):
"""
Predict with yolo model trained to detect traffic light status and more.
Args:
frame - OpenCV image in BGR
Return:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
# Predict
output = model(frame)
# Unpack the output
result = output.xyxy[0]
boxes = result[:,:4]
conf = result[:,4]
labels = result[:,5].type(torch.LongTensor)
# Apply threshold
keep = conf > thresh
boxes = boxes[keep]
conf = conf[keep]
labels = labels[keep] # In 0-indexed yolo format
# Convert COCO labels because some classes were removed
labels = yolo_to_coco_traffic(labels)
return boxes, labels, conf
|
the-stack_0_20871 | #!/usr/bin/env python3
import argparse
import json
import os
import sys
from argparse import RawTextHelpFormatter
from configparser import ConfigParser
from datetime import datetime
from glob import glob
from urllib import request
INSTRUCTIONS = '''
This can be used to download latest episodes from stations belonging to
global radio network.
'''
DOWNLOAD_PATH = '~/Downloads'
MAX_PROGRESS_BAR_BLOCKS = 10
class Episode:
def __init__(self, episode):
self.id = episode['id']
self.start_date = episode['startDate']
self.date = datetime.strptime(episode['startDate'][0:10], '%Y-%m-%d')
self.stream_url = episode['streamUrl']
self.title = episode['title']
self.title_with_date = f'{self.title} ({episode["startDate"][0:10]})'
def __str__(self):
return f'Episode{{id:"{self.id}", date:"{self.date}", start_date:"{self.start_date}", ' \
f'title:"{self.title}", title_with_date:"{self.title_with_date}"}}'
class Download:
def __init__(self, download_file):
self.file_name = download_file
self.date = datetime.strptime(download_file[0:8], '%Y%m%d')
def __str__(self):
return f'Download{{file:"{self.file_name}", date:"{self.date}"}}'
def _get_show_catchup_url():
if show_config['station_catchup_url'].endswith("/"):
return show_config['station_catchup_url'] + show_config['show_id']
else:
return show_config['station_catchup_url'] + "/" + show_config['show_id']
def _get_show_catchup_response():
verbose('Calling channel catchup endpoint...')
response = request.urlopen(_get_show_catchup_url()).read().decode('utf-8')
return json.loads(response)
def _read_catchup_response_from_file():
verbose('Reading a fake response from file...')
with open('fake-response.json') as f:
file_data = f.read()
return json.loads(file_data)
def _get_show_response():
if args.with_fake_response:
return _read_catchup_response_from_file()
else:
return _get_show_catchup_response()
def _get_file_format():
if show_config['file_format']:
return show_config['file_format']
else:
return 'm4a'
def _get_episode_downloads_folder():
if show_config['download_folder']:
download_folder = show_config['download_folder']
else:
download_folder = DOWNLOAD_PATH
download_folder = download_folder.strip()
os.makedirs(name=os.path.expanduser(download_folder), exist_ok=True)
if download_folder.endswith("/"):
return download_folder
else:
return download_folder + "/"
def get_downloaded_episodes():
verbose('Getting list of all downloaded episodes...')
download_folder = os.path.expanduser(_get_episode_downloads_folder())
os.chdir(download_folder)
files = glob('*.' + _get_file_format())
verbose(f'Found {len(files)} downloads in the folder...')
downloads = []
for file in files:
downloaded_file = Download(file)
downloads.append(downloaded_file)
return downloads
def _parse_episodes(show):
parsed_episodes = []
for episode in show['episodes']:
parsed = Episode(episode)
parsed_episodes.append(parsed)
return parsed_episodes
def get_latest_episodes():
show = _get_show_response()
verbose(f'Show json response: {json.dumps(show, indent=2)}')
episodes = _parse_episodes(show)
verbose(f'Found {len(episodes)} episodes in show response...')
for e in episodes:
verbose(e)
return episodes
def download(episode):
if args.with_fake_response:
progress_complete(episode.title_with_date, 100)
return
url_request = request.Request(episode.stream_url)
url_connect = request.urlopen(url_request)
file_size = int(url_connect.info()['Content-Length'])
download_block_size = 8192
downloaded_size = 0
out_file_name = datetime.strftime(episode.date, "%Y%m%d") + '.' + _get_file_format()
download_folder = os.path.expanduser(_get_episode_downloads_folder())
tmp_folder = download_folder + 'tmp'
if not os.path.exists(tmp_folder):
os.mkdir(tmp_folder)
os.chdir(tmp_folder)
with open(out_file_name, 'wb') as out_file:
while True:
buffer = url_connect.read(download_block_size)
downloaded_size += len(buffer)
if not buffer:
break
out_file.write(buffer)
progress(episode.title_with_date, file_size, downloaded_size)
os.rename(out_file_name, os.path.join(download_folder, out_file_name))
progress_complete(episode.title_with_date, file_size)
def progress(file_name, total_len, current_size):
progress_blocks = int(total_len / MAX_PROGRESS_BAR_BLOCKS)
current_position = int(current_size / progress_blocks)
sys.stdout.write('\r%s: [%s%s] %s/%s %s'
% (file_name,
'=' * current_position,
' ' * (MAX_PROGRESS_BAR_BLOCKS - current_position),
current_size,
total_len,
''
))
sys.stdout.flush()
def progress_complete(file_name, total_len):
print('\r%s: [%s] %s/%s => ✅ Done' % (file_name, '=' * MAX_PROGRESS_BAR_BLOCKS, total_len, total_len))
def get_episodes_to_download():
episodes = get_latest_episodes()
downloaded = get_downloaded_episodes()
downloaded_dates = {file.date for file in downloaded}
episodes_to_download = []
for episode in episodes:
if episode.date not in downloaded_dates:
episodes_to_download.append(episode)
return episodes_to_download
def download_latest():
verbose('Checking for latest episodes..')
pending_downloads = get_episodes_to_download()
if not pending_downloads:
print(f'Nothing to download, all up to date 🙌')
return
print(f'Found {len(pending_downloads)} new episodes to download.')
for i in range(0, len(pending_downloads)):
print(f'Downloading {i + 1} of {len(pending_downloads)}:')
download(pending_downloads[i])
print(f'Downloads complete 🏁')
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description=INSTRUCTIONS)
parser.add_argument('--with-fake-response', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args(sys.argv[1:])
if args.verbose:
verbose = print
else:
def verbose(*arg, **kw):
pass
verbose(f'Called download latest episodes with args: {args}')
config_file = os.path.expanduser('~/.global_radio_downloader.cfg')
if not os.path.exists(config_file):
parser.print_help()
sys.exit(1)
config = ConfigParser(default_section='radio-station')
config.read(config_file)
show_config = config['radio-station']
download_latest()
|
the-stack_0_20872 | #!/usr/bin/env python
import glob
import os
import os.path
import sys
if sys.version_info < (3, 5, 0):
sys.stderr.write("ERROR: You need Python 3.5 or later to use mypy.\n")
exit(1)
# we'll import stuff from the source tree, let's ensure is on the sys path
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
# This requires setuptools when building; setuptools is not needed
# when installing from a wheel file (though it is still neeeded for
# alternative forms of installing, as suggested by README.md).
from setuptools import setup
from setuptools.command.build_py import build_py
from mypy.version import __version__ as version
from mypy import git
git.verify_git_integrity_or_abort(".")
description = 'Optional static typing for Python'
long_description = '''
Mypy -- Optional Static Typing for Python
=========================================
Add type annotations to your Python programs, and use mypy to type
check them. Mypy is essentially a Python linter on steroids, and it
can catch many programming errors by analyzing your program, without
actually having to run it. Mypy has a powerful type system with
features such as type inference, gradual typing, generics and union
types.
'''.lstrip()
def find_package_data(base, globs, root='mypy'):
"""Find all interesting data files, for setup(package_data=)
Arguments:
root: The directory to search in.
globs: A list of glob patterns to accept files.
"""
rv_dirs = [root for root, dirs, files in os.walk(base)]
rv = []
for rv_dir in rv_dirs:
files = []
for pat in globs:
files += glob.glob(os.path.join(rv_dir, pat))
if not files:
continue
rv.extend([os.path.relpath(f, root) for f in files])
return rv
class CustomPythonBuild(build_py):
def pin_version(self):
path = os.path.join(self.build_lib, 'mypy')
self.mkpath(path)
with open(os.path.join(path, 'version.py'), 'w') as stream:
stream.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
build_py.run(self)
cmdclass = {'build_py': CustomPythonBuild}
package_data = ['py.typed']
package_data += find_package_data(os.path.join('mypy', 'typeshed'), ['*.py', '*.pyi'])
package_data += find_package_data(os.path.join('mypy', 'xml'), ['*.xsd', '*.xslt', '*.css'])
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == '--use-mypyc':
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv('MYPY_USE_MYPYC', None) == '1':
USE_MYPYC = True
if USE_MYPYC:
MYPYC_BLACKLIST = tuple(os.path.join('mypy', x) for x in (
# Need to be runnable as scripts
'__main__.py',
'sitepkgs.py',
os.path.join('dmypy', '__main__.py'),
# Uses __getattr__/__setattr__
'split_namespace.py',
# Lies to mypy about code reachability
'bogus_type.py',
# We don't populate __file__ properly at the top level or something?
# Also I think there would be problems with how we generate version.py.
'version.py',
)) + (
# Don't want to grab this accidentally
os.path.join('mypyc', 'lib-rt', 'setup.py'),
)
everything = (
[os.path.join('mypy', x) for x in find_package_data('mypy', ['*.py'])] +
[os.path.join('mypyc', x) for x in find_package_data('mypyc', ['*.py'], root='mypyc')])
# Start with all the .py files
all_real_pys = [x for x in everything
if not x.startswith(os.path.join('mypy', 'typeshed') + os.sep)]
# Strip out anything in our blacklist
mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
# Strip out any test code
mypyc_targets = [x for x in mypyc_targets
if not x.startswith((os.path.join('mypy', 'test') + os.sep,
os.path.join('mypyc', 'test') + os.sep,
os.path.join('mypyc', 'test-data') + os.sep,
))]
# ... and add back in the one test module we need
mypyc_targets.append(os.path.join('mypy', 'test', 'visitors.py'))
# The targets come out of file system apis in an unspecified
# order. Sort them so that the mypyc output is deterministic.
mypyc_targets.sort()
use_other_mypyc = os.getenv('ALTERNATE_MYPYC_PATH', None)
if use_other_mypyc:
# This bit is super unfortunate: we want to use a different
# mypy/mypyc version, but we've already imported parts, so we
# remove the modules that we've imported already, which will
# let the right versions be imported by mypyc.
del sys.modules['mypy']
del sys.modules['mypy.version']
del sys.modules['mypy.git']
sys.path.insert(0, use_other_mypyc)
from mypyc.build import mypycify
opt_level = os.getenv('MYPYC_OPT_LEVEL', '3')
force_multifile = os.getenv('MYPYC_MULTI_FILE', '') == '1'
ext_modules = mypycify(
mypyc_targets + ['--config-file=mypy_bootstrap.ini'],
opt_level=opt_level,
# Use multi-file compliation mode on windows because without it
# our Appveyor builds run out of memory sometimes.
multi_file=sys.platform == 'win32' or force_multifile,
)
else:
ext_modules = []
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development',
]
setup(name='mypy',
version=version,
description=description,
long_description=long_description,
author='Jukka Lehtosalo',
author_email='[email protected]',
url='http://www.mypy-lang.org/',
license='MIT License',
py_modules=[],
ext_modules=ext_modules,
packages=[
'mypy', 'mypy.test', 'mypy.server', 'mypy.plugins', 'mypy.dmypy',
'mypyc', 'mypyc.test',
],
package_data={'mypy': package_data},
scripts=['scripts/mypyc'],
entry_points={'console_scripts': ['mypy=mypy.__main__:console_entry',
'stubgen=mypy.stubgen:main',
'dmypy=mypy.dmypy.client:console_entry',
]},
classifiers=classifiers,
cmdclass=cmdclass,
# When changing this, also update mypy-requirements.txt.
install_requires=['typed_ast >= 1.4.0, < 1.5.0',
'typing_extensions>=3.7.4',
'mypy_extensions >= 0.4.3, < 0.5.0',
],
# Same here.
extras_require={'dmypy': 'psutil >= 4.0'},
python_requires=">=3.5",
include_package_data=True,
)
|
the-stack_0_20873 | import collections
import csv
import glob
import os
import numpy as np
Prices = collections.namedtuple("Prices", field_names=["open", "high", "low", "close", "volume"])
def read_csv(file_name, sep=",", filter_data=True, fix_open_price=False):
print("Reading", file_name)
with open(file_name, "rt", encoding="utf-8") as fd:
reader = csv.reader(fd, delimiter=sep)
h = next(reader)
if "<OPEN>" not in h and sep == ",":
return read_csv(file_name, ";")
indices = [h.index(s) for s in ("<OPEN>", "<HIGH>", "<LOW>", "<CLOSE>", "<VOL>")]
o, h, l, c, v = [], [], [], [], []
count_out = 0
count_filter = 0
count_fixed = 0
prev_vals = None
for row in reader:
vals = list(map(float, [row[idx] for idx in indices]))
if filter_data and all(map(lambda v: abs(v - vals[0]) < 1e-8, vals[:-1])):
count_filter += 1
continue
po, ph, pl, pc, pv = vals
# fix open price for current bar to match close price for the previous bar
if fix_open_price and prev_vals is not None:
ppo, pph, ppl, ppc, ppv = prev_vals
if abs(po - ppc) > 1e-8:
count_fixed += 1
po = ppc
pl = min(pl, po)
ph = max(ph, po)
count_out += 1
o.append(po)
c.append(pc)
h.append(ph)
l.append(pl)
v.append(pv)
prev_vals = vals
print(
"Read done, got %d rows, %d filtered, %d open prices adjusted"
% (count_filter + count_out, count_filter, count_fixed)
)
return Prices(
open=np.array(o, dtype=np.float32),
high=np.array(h, dtype=np.float32),
low=np.array(l, dtype=np.float32),
close=np.array(c, dtype=np.float32),
volume=np.array(v, dtype=np.float32),
)
def prices_to_relative(prices):
"""
Convert prices to relative in respect to open price
:param ochl: tuple with open, close, high, low
:return: tuple with open, rel_close, rel_high, rel_low
"""
assert isinstance(prices, Prices)
rh = (prices.high - prices.open) / prices.open
rl = (prices.low - prices.open) / prices.open
rc = (prices.close - prices.open) / prices.open
return Prices(open=prices.open, high=rh, low=rl, close=rc, volume=prices.volume)
def load_relative(csv_file):
return prices_to_relative(read_csv(csv_file))
def price_files(dir_name):
result = []
for path in glob.glob(os.path.join(dir_name, "*.csv")):
result.append(path)
return result
def load_year_data(year, basedir="data"):
y = str(year)[-2:]
result = {}
for path in glob.glob(os.path.join(basedir, "*_%s*.csv" % y)):
result[path] = load_relative(path)
return result
|
the-stack_0_20875 | """
<Module Name>
util.py
<Author>
Santiago Torres-Arias <[email protected]>
<Started>
Nov 15, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
general-purpose utilities for binary data handling and pgp data parsing
"""
import struct
import binascii
import re
import logging
from distutils.version import StrictVersion # pylint: disable=no-name-in-module,import-error
CRYPTO = True
NO_CRYPTO_MSG = 'gpg.utils requires the cryptography library'
try:
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes as hashing
except ImportError:
CRYPTO = False
from securesystemslib import exceptions
from securesystemslib import process
from securesystemslib.gpg import constants
from securesystemslib.gpg.exceptions import PacketParsingError
log = logging.getLogger(__name__)
def get_mpi_length(data):
"""
<Purpose>
parses an MPI (Multi-Precision Integer) buffer and returns the appropriate
length. This is mostly done to perform bitwise to byte-wise conversion.
See RFC4880 section 3.2. Multiprecision Integers for details.
<Arguments>
data: The MPI data
<Exceptions>
None
<Side Effects>
None
<Returns>
The length of the MPI contained at the beginning of this data buffer.
"""
bitlength = int(struct.unpack(">H", data)[0])
# Notice the /8 at the end, this length is the bitlength, not the length of
# the data in bytes (as len reports it)
return int((bitlength - 1)/8) + 1
def hash_object(headers, algorithm, content):
"""
<Purpose>
Hash data prior to signature verification in conformance of the RFC4880
openPGP standard.
<Arguments>
headers: the additional OpenPGP headers as populated from
gpg_generate_signature
algorithm: The hash algorithm object defined by the cryptography.io hashes
module
content: the signed content
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError if:
the cryptography module is unavailable
<Side Effects>
None
<Returns>
The RFC4880-compliant hashed buffer
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
# As per RFC4880 Section 5.2.4., we need to hash the content,
# signature headers and add a very opinionated trailing header
hasher = hashing.Hash(algorithm, backend=backends.default_backend())
hasher.update(content)
hasher.update(headers)
hasher.update(b'\x04\xff')
hasher.update(struct.pack(">I", len(headers)))
return hasher.finalize()
def parse_packet_header(data, expected_type=None):
"""
<Purpose>
Parse out packet type and header and body lengths from an RFC4880 packet.
<Arguments>
data:
An RFC4880 packet as described in section 4.2 of the rfc.
expected_type: (optional)
Used to error out if the packet does not have the expected
type. See securesystemslib.gpg.constants.PACKET_TYPE_* for
available types.
<Exceptions>
securesystemslib.gpg.exceptions.PacketParsingError
If the new format packet length encodes a partial body length
If the old format packet length encodes an indeterminate length
If header or body length could not be determined
If the expected_type was passed and does not match the packet type
IndexError
If the passed data is incomplete
<Side Effects>
None.
<Returns>
A tuple of packet type, header length, body length and packet length.
(see RFC4880 4.3. for the list of available packet types)
"""
data = bytearray(data)
header_len = None
body_len = None
# If Bit 6 of 1st octet is set we parse a New Format Packet Length, and
# an Old Format Packet Lengths otherwise
if data[0] & 0b01000000:
# In new format packet lengths the packet type is encoded in Bits 5-0 of
# the 1st octet of the packet
packet_type = data[0] & 0b00111111
# The rest of the packet header is the body length header, which may
# consist of one, two or five octets. To disambiguate the RFC, the first
# octet of the body length header is the second octet of the packet.
if data[1] < 192:
header_len = 2
body_len = data[1]
elif data[1] >= 192 and data[1] <= 223:
header_len = 3
body_len = (data[1] - 192 << 8) + data[2] + 192
elif data[1] >= 224 and data[1] < 255:
raise PacketParsingError("New length "
"format packets of partial body lengths are not supported")
elif data[1] == 255:
header_len = 6
body_len = data[2] << 24 | data[3] << 16 | data[4] << 8 | data[5]
else: # pragma: no cover
# Unreachable: octet must be between 0 and 255
raise PacketParsingError("Invalid new length")
else:
# In old format packet lengths the packet type is encoded in Bits 5-2 of
# the 1st octet and the length type in Bits 1-0
packet_type = (data[0] & 0b00111100) >> 2
length_type = data[0] & 0b00000011
# The body length is encoded using one, two, or four octets, starting
# with the second octet of the packet
if length_type == 0:
body_len = data[1]
header_len = 2
elif length_type == 1:
header_len = 3
body_len = struct.unpack(">H", data[1:header_len])[0]
elif length_type == 2:
header_len = 5
body_len = struct.unpack(">I", data[1:header_len])[0]
elif length_type == 3:
raise PacketParsingError("Old length "
"format packets of indeterminate length are not supported")
else: # pragma: no cover (unreachable)
# Unreachable: bits 1-0 must be one of 0 to 3
raise PacketParsingError("Invalid old length")
if header_len is None or body_len is None: # pragma: no cover
# Unreachable: One of above must have assigned lengths or raised error
raise PacketParsingError("Could not determine packet length")
if expected_type is not None and packet_type != expected_type:
raise PacketParsingError("Expected packet "
"{}, but got {} instead!".format(expected_type, packet_type))
return packet_type, header_len, body_len, header_len + body_len
def compute_keyid(pubkey_packet_data):
"""
<Purpose>
compute a keyid from an RFC4880 public-key buffer
<Arguments>
pubkey_packet_data: the public-key packet buffer
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError if:
the cryptography module is unavailable
<Side Effects>
None
<Returns>
The RFC4880-compliant hashed buffer
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
hasher = hashing.Hash(hashing.SHA1(), backend=backends.default_backend())
hasher.update(b'\x99')
hasher.update(struct.pack(">H", len(pubkey_packet_data)))
hasher.update(bytes(pubkey_packet_data))
return binascii.hexlify(hasher.finalize()).decode("ascii")
def parse_subpacket_header(data):
""" Parse out subpacket header as per RFC4880 5.2.3.1. Signature Subpacket
Specification. """
# NOTE: Although the RFC does not state it explicitly, the length encoded
# in the header must be greater equal 1, as it includes the mandatory
# subpacket type octet.
# Hence, passed bytearrays like [0] or [255, 0, 0, 0, 0], which encode a
# subpacket length 0 are invalid.
# The caller has to deal with the resulting IndexError.
if data[0] < 192:
length_len = 1
length = data[0]
elif data[0] >= 192 and data[0] < 255:
length_len = 2
length = ((data[0] - 192 << 8) + (data[1] + 192))
elif data[0] == 255:
length_len = 5
length = struct.unpack(">I", data[1:length_len])[0]
else: # pragma: no cover (unreachable)
raise PacketParsingError("Invalid subpacket header")
return data[length_len], length_len + 1, length - 1, length_len + length
def parse_subpackets(data):
"""
<Purpose>
parse the subpackets fields
<Arguments>
data: the unparsed subpacketoctets
<Exceptions>
IndexErrorif the subpackets octets are incomplete or malformed
<Side Effects>
None
<Returns>
A list of tuples with like:
[ (packet_type, data),
(packet_type, data),
...
]
"""
parsed_subpackets = []
position = 0
while position < len(data):
subpacket_type, header_len, _, subpacket_len = \
parse_subpacket_header(data[position:])
payload = data[position+header_len:position+subpacket_len]
parsed_subpackets.append((subpacket_type, payload))
position += subpacket_len
return parsed_subpackets
def get_version():
"""
<Purpose>
Uses `gpg2 --version` to get the version info of the installed gpg2
and extracts and returns the version number.
The executed base command is defined in constants.GPG_VERSION_COMMAND.
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError:
If the gpg command is not available
<Returns>
Version number string, e.g. "2.1.22"
"""
if not constants.HAVE_GPG: # pragma: no cover
raise exceptions.UnsupportedLibraryError(constants.NO_GPG_MSG)
command = constants.GPG_VERSION_COMMAND
gpg_process = process.run(command, stdout=process.PIPE,
stderr=process.PIPE, universal_newlines=True)
full_version_info = gpg_process.stdout
version_string = re.search(r'(\d\.\d\.\d+)', full_version_info).group(1)
return version_string
def is_version_fully_supported():
"""
<Purpose>
Compares the version of installed gpg2 with the minimal fully supported
gpg2 version (2.1.0).
<Returns>
True if the version returned by `get_version` is greater-equal
constants.FULLY_SUPPORTED_MIN_VERSION, False otherwise.
"""
installed_version = get_version()
# Excluded so that coverage does not vary in different test environments
if (StrictVersion(installed_version) >=
StrictVersion(constants.FULLY_SUPPORTED_MIN_VERSION)): # pragma: no cover
return True
else: # pragma: no cover
return False
def get_hashing_class(hash_algorithm_id):
"""
<Purpose>
Return a pyca/cryptography hashing class reference for the passed RFC4880
hash algorithm ID.
<Arguments>
hash_algorithm_id:
one of SHA1, SHA256, SHA512 (see securesystemslib.gpg.constants)
<Exceptions>
ValueError
if the passed hash_algorithm_id is not supported.
<Returns>
A pyca/cryptography hashing class
"""
supported_hashing_algorithms = [constants.SHA1, constants.SHA256,
constants.SHA512]
corresponding_hashing_classes = [hashing.SHA1, hashing.SHA256,
hashing.SHA512]
# Map supported hash algorithm ids to corresponding hashing classes
hashing_class = dict(zip(supported_hashing_algorithms,
corresponding_hashing_classes))
try:
return hashing_class[hash_algorithm_id]
except KeyError:
raise ValueError("Hash algorithm '{}' not supported, must be one of '{}' "
"(see RFC4880 9.4. Hash Algorithms).".format(hash_algorithm_id,
supported_hashing_algorithms))
|
the-stack_0_20878 | import csv
from book import BookExporter
class CategoryExporter:
"""
Write a csv formatted file containing one entry per book.
"""
def __init__(self, category):
self.category = category
def exec(self, file):
writer = csv.writer(file)
writer.writerow([
'product_page_url',
'universal_ product_code (upc)',
'title',
'price_including_tax',
'price_excluding_tax',
'number_available',
'product_description',
'category',
'review_rating',
'image_url'
])
for book in self.category.books:
exporter = BookExporter(book)
exporter.exec(file)
|
the-stack_0_20879 | import copy
from typing import Tuple
import numpy as np
from odyssey.distribution import Distribution
from iliad.integrators.info import EuclideanLeapfrogInfo
from iliad.integrators.states import EuclideanLeapfrogState
def single_step(
distr: Distribution,
state: EuclideanLeapfrogState,
info: EuclideanLeapfrogInfo,
step_size: float
) -> Tuple[EuclideanLeapfrogState, EuclideanLeapfrogInfo]:
"""Implements a single step of the leapfrog integrator, which is symmetric,
symplectic, and second-order accurate for separable Hamiltonian systems.
Args:
distr: The distribution that guides the time evolution of the Euclidean
Hamiltonian trajectory.
state: An object containing the position and momentum variables of the
state in phase space, and possibly previously computed log-posterior
and gradients.
info: An object that keeps track of the number of fixed point iterations
and whether or not integration has been successful.
step_size: Integration step_size.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and its gradient.
info: An information object with the indicator of successful integration.
"""
half_step = 0.5*step_size
state.momentum += half_step * state.force
state.velocity = state.inv_metric.dot(state.momentum)
state.position += step_size * state.velocity
state.update(distr)
state.momentum += half_step * state.force
return state, info
def euclidean_leapfrog(
state: EuclideanLeapfrogState,
step_size: float,
num_steps: int,
distr: Distribution
) -> Tuple[EuclideanLeapfrogState, EuclideanLeapfrogInfo]:
"""Implements the leapfrog integrator for a separable Hamiltonian.
Args:
state: An object containing the position and momentum variables of the
state in phase space, and possibly previously computed log-posterior
and gradients.
step_size: Integration step_size.
num_steps: Number of integration steps.
distr: The distribution that guides the time evolution of the Euclidean
Hamiltonian trajectory.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and its gradient.
info: An information object with the indicator of successful integration.
"""
state = copy.copy(state)
info = EuclideanLeapfrogInfo()
for i in range(num_steps):
state, info = single_step(distr, state, info, step_size)
state.velocity = state.inv_metric.dot(state.momentum)
return state, info
|
the-stack_0_20881 | """
Context managers, i.e. things you can use with the 'with' statement.
"""
import fcntl
from contextlib import contextmanager
from typing import Iterator, IO, Any, Union
@contextmanager
def flock(lockfile: Union[int, IO[Any]], shared: bool=False) -> Iterator[None]:
"""Lock a file object using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX."""
fcntl.flock(lockfile, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lockfile, fcntl.LOCK_UN)
@contextmanager
def lockfile(filename: str, shared: bool=False) -> Iterator[None]:
"""Lock a file using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.
The file is given by name and will be created if it does not exist."""
with open(filename, 'w') as lock:
with flock(lock, shared=shared):
yield
|
the-stack_0_20882 |
class SidebarPartField:
"""
Performs introspection into document part fields.
"""
def __init__(
self,
document,
model,
field_name,
options={}
):
"""
`document` = papermerge.core.models.document instance
`model` = document part model class as provided by 3rd party app
`field_name` name of the field we are interested in
"""
self.document = document
self.model = model
self.field_name = field_name
self.options = options
fields = [
field
for field in self.model._meta.fields
if field.name == field_name
]
if fields:
# django field matching given field_name
self.field = fields[0]
def to_json(self):
ret = {}
internal_type = self.get_internal_type()
ret['class'] = internal_type
if internal_type == 'ForeignKey':
r_obj = self.get_value()
if self.options and self.options[self.field_name]:
opts = self.options[self.field_name]
# choice_fields: ['id', 'name'] option instructs
# that this foreign key must be displayed as
# choices. Thus, returned keys are:
# * value
# * choices
if 'choice_fields' in opts:
value, choices = self._choice_fields_opts(
opts=opts,
r_obj=r_obj
)
ret['value'] = value
ret['choices'] = choices
ret['field_name'] = self.field_name
elif 'fields' in opts:
value = self._fields_opts(
opts=opts,
r_obj=r_obj
)
ret['field_name'] = self.field_name
ret['value'] = value
else:
_f = self.field_name
msg = f"Field {_f} is foreignkey. You provide field_options"
raise ValueError(
msg
)
else:
ret['value'] = self.get_value()
ret['field_name'] = self.field_name
return ret
def get_internal_type(self):
return self.field.get_internal_type()
def get_value(self):
parts = getattr(self.document, 'parts')
value = getattr(parts, self.field_name)
return value
def _fields_opts(
self,
opts,
r_obj
):
ret_dict = {}
fields = opts['fields']
for field in fields:
ret_dict[field] = getattr(r_obj, field)
return ret_dict
def _choice_fields_opts(
self,
opts,
r_obj
):
ret_value = None
ret_choices = []
choice_fields = opts['choice_fields']
if choice_fields and r_obj:
ret_value = (
getattr(r_obj, choice_fields[0]),
getattr(r_obj, choice_fields[1])
)
remote_model_objects = getattr(
self.field.remote_field.model, 'objects'
)
for r_model_inst in remote_model_objects.all():
if choice_fields and r_model_inst:
ret_choices.append(
(
getattr(r_model_inst, choice_fields[0]),
getattr(r_model_inst, choice_fields[1]),
)
)
return ret_value, ret_choices
class SidebarPart:
"""
Wrapper class for managing/rendering document parts
on sidebar.
"""
fields = None
exclude = None
readonly_fields = ()
# model class of the document part
model = None
field_options = {}
def __init__(self, document):
# papermerge.core.models.document instance
self.document = document
if not self.model:
raise ValueError("SidebarPart: missing model attribute")
self.opts = self.model._meta
def to_json(self):
fields = [
SidebarPartField(
document=self.document,
field_name=field,
model=self.model,
options=self.field_options
).to_json()
for field in self.fields
]
ret = {
'label': self.get_label(),
'verbose_name': self.get_verbose_name(),
'js_widget': self.get_js_widget(),
'fields': fields
}
return ret
def get_label(self):
if hasattr(self, 'label'):
return getattr(self, 'label')
return self.opts.app_config.label
def get_verbose_name(self):
if hasattr(self, 'verbose_name'):
return getattr(self, 'verbose_name')
return self.opts.app_config.verbose_name
def get_js_widget(self):
if hasattr(self, 'js_widget'):
return getattr(self, 'js_widget')
return 'DefaultWidget'
|
the-stack_0_20887 | #Class to calculate precision and recall
import random
class precision_recall_calculator():
def __init__(self, test_data, train_data, pm, is_model):
self.test_data = test_data
self.train_data = train_data
self.user_test_sample = None
self.model1 = pm
self.model2 = is_model
self.ism_training_dict = dict()
self.pm_training_dict = dict()
self.test_dict = dict()
#Method to return random percentage of values from a list
def remove_percentage(self, list_a, percentage):
k = int(len(list_a) * percentage)
random.seed(0)
indicies = random.sample(range(len(list_a)), k)
new_list = [list_a[i] for i in indicies]
return new_list
#Create a test sample of users for use in calculating precision
#and recall
def create_user_test_sample(self, percentage):
#Find users common between training and test set
users_test_and_training = list(set(self.test_data['user_id'].unique()).intersection(set(self.train_data['user_id'].unique())))
print("Length of user_test_and_training:%d" % len(users_test_and_training))
#Take only random user_sample of users for evaluations
self.users_test_sample = self.remove_percentage(users_test_and_training, percentage)
print("Length of user sample:%d" % len(self.users_test_sample))
#Method to generate recommendations for users in the user test sample
def get_test_sample_recommendations(self):
#For these test_sample users, get top 10 recommendations from training set
#self.ism_training_dict = {}
#self.pm_training_dict = {}
#self.test_dict = {}
for user_id in self.users_test_sample:
#Get items for user_id from item similarity model
print("Getting recommendations for user:%s" % user_id)
user_sim_items = self.model2.recommend(user_id)
self.ism_training_dict[user_id] = list(user_sim_items["song"])
#Get items for user_id from popularity model
user_sim_items = self.model1.recommend(user_id)
self.pm_training_dict[user_id] = list(user_sim_items["song"])
#Get items for user_id from test_data
test_data_user = self.test_data[self.test_data['user_id'] == user_id]
self.test_dict[user_id] = set(test_data_user['song'].unique() )
#Method to calculate the precision and recall measures
def calculate_precision_recall(self):
#Create cutoff list for precision and recall calculation
cutoff_list = list(range(1,11))
#For each distinct cutoff:
# 1. For each distinct user, calculate precision and recall.
# 2. Calculate average precision and recall.
ism_avg_precision_list = []
ism_avg_recall_list = []
pm_avg_precision_list = []
pm_avg_recall_list = []
num_users_sample = len(self.users_test_sample)
for N in cutoff_list:
ism_sum_precision = 0
ism_sum_recall = 0
pm_sum_precision = 0
pm_sum_recall = 0
ism_avg_precision = 0
ism_avg_recall = 0
pm_avg_precision = 0
pm_avg_recall = 0
for user_id in self.users_test_sample:
ism_hitset = self.test_dict[user_id].intersection(set(self.ism_training_dict[user_id][0:N]))
pm_hitset = self.test_dict[user_id].intersection(set(self.pm_training_dict[user_id][0:N]))
testset = self.test_dict[user_id]
pm_sum_precision += float(len(pm_hitset))/float(N)
pm_sum_recall += float(len(pm_hitset))/float(len(testset))
ism_sum_precision += float(len(ism_hitset))/float(len(testset))
ism_sum_recall += float(len(ism_hitset))/float(N)
pm_avg_precision = pm_sum_precision/float(num_users_sample)
pm_avg_recall = pm_sum_recall/float(num_users_sample)
ism_avg_precision = ism_sum_precision/float(num_users_sample)
ism_avg_recall = ism_sum_recall/float(num_users_sample)
ism_avg_precision_list.append(ism_avg_precision)
ism_avg_recall_list.append(ism_avg_recall)
pm_avg_precision_list.append(pm_avg_precision)
pm_avg_recall_list.append(pm_avg_recall)
return (pm_avg_precision_list, pm_avg_recall_list, ism_avg_precision_list, ism_avg_recall_list)
#A wrapper method to calculate all the evaluation measures
def calculate_measures(self, percentage):
#Create a test sample of users
self.create_user_test_sample(percentage)
#Generate recommendations for the test sample users
self.get_test_sample_recommendations()
#Calculate precision and recall at different cutoff values
#for popularity mode (pm) as well as item similarity model (ism)
return self.calculate_precision_recall()
#return (pm_avg_precision_list, pm_avg_recall_list, ism_avg_precision_list, ism_avg_recall_list) |
the-stack_0_20888 | import os, time, yaml
from datetime import datetime
from pycti import OpenCTIConnectorHelper, get_config_variable
from cape.cape import cuckoo, cuckooReport
from cape.telemetry import openCTIInterface
class capeConnector:
"""Connector object"""
def __init__(self):
"""Read in config variables"""
config_file_path = os.path.dirname(os.path.abspath(__file__))
config_file_path += "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
self.cape_api_url = get_config_variable(
"CAPE_API_URL", ["cape", "api_url"], config
)
self.cape_url = get_config_variable(
"CAPE_BASE_URL", ["cape", "base_url"], config
)
self.EnableNetTraffic = get_config_variable(
"CAPE_ENABLE_NETWORK_TRAFFIC",
["cape", "enable_network_traffic"],
config,
default=False,
)
self.EnableRegKeys = get_config_variable(
"CAPE_ENABLE_REGISTRY_KEYS",
["cape", "enable_registry_keys"],
config,
default=False,
)
self.verify_ssl = get_config_variable(
"VERIFY_SSL", ["cape", "verify_ssl"], config, default=True
)
self.interval = get_config_variable(
"CAPE_INTERVAL", ["cape", "interval"], config, True, 30
)
self.start_id = get_config_variable(
"CAPE_START_TASK_ID", ["cape", "start_task_id"], config, True, 0
)
self.report_score = get_config_variable(
"CAPE_REPORT_SCORE", ["cape", "report_score"], config, True, 0
)
self.create_indicators = get_config_variable(
"CAPE_CREATE_INDICATORS", ["cape", "create_indicators"], config
)
self.update_existing_data = get_config_variable(
"CONNECTOR_UPDATE_EXISTING_DATA",
["connector", "update_existing_data"],
config,
)
self.cape_api: cuckoo = cuckoo(self.helper, self.cape_api_url, self.verify_ssl)
def get_interval(self):
"""Converts interval hours to seconds"""
return int(self.interval) * 60
@property
def first_run(self):
"""Checks if connector has run before"""
current_state = self.helper.get_state()
return current_state is None or "last_run" not in current_state
def run(self):
"""Run connector on a schedule"""
while True:
if self.first_run:
state = self.helper.get_state()
self.helper.log_info("Connector has never run")
self.helper.log_info(str(state))
# Get Last Cape Task Pulled
if not state:
current_task = 0
else:
if "task" in state:
current_task = self.helper.get_state()["task"]
else:
current_task = 0
# Check If starting Task > last task
if self.start_id > current_task:
current_task = self.start_id
self.helper.set_state({"task": self.start_id})
else:
last_run = datetime.utcfromtimestamp(
self.helper.get_state()["last_run"]
).strftime("%Y-%m-%d %H:%M:%S")
self.helper.log_info("Connector last run: " + last_run)
# Get Last Cape Task Pulled
state = self.helper.get_state()
self.helper.log_info(str(state))
if not state:
current_task = 0
self.helper.log_info("Last Task ID (STATE): " + str(current_task))
if "task" in state:
current_task = state["task"]
self.helper.log_info("Last Task ID (STATE): " + str(current_task))
else:
current_task = 0
# Check If starting Task > last task
if self.start_id > current_task:
current_task = self.start_id
self.helper.set_state({"task": self.start_id})
try:
CapeTasks = (
self.cape_api.getCuckooTasks()
) # Pull List of tasks from the Cape API
except Exception as err:
self.helper.log_error("Error connecting to Cape API")
self.helper.log_error(str(err))
raise (err)
for task in reversed(CapeTasks):
if not task["status"] == "reported":
continue # If task Has not reported Skip
if not task["completed_on"]:
continue # If task Has not completed Skip
try:
if task["id"] > current_task:
taskSummary = cuckooReport(
self.cape_api.getTaskReport(task["id"])
) # Pull Cape Report and Searilize
if not taskSummary:
continue # If no report continue
if not taskSummary.info:
continue # If no report.info continue - we really need this :)
self.helper.log_info(f"Processing Task {taskSummary.info.id}")
# Process and submit cape task as stix bundle
openCTIInterface(
taskSummary,
self.helper,
self.update_existing_data,
[],
self.create_indicators,
self.cape_url,
self.EnableNetTraffic,
self.EnableRegKeys,
self.report_score,
)
# Update last task pulled
self.helper.set_state({"task": taskSummary.info.id})
self.helper.log_info(f"Synced task {task['id']}")
except Exception as e:
self.helper.log_error(
f"An error Occured fetching task {task['id']}; {str(e)}"
)
self.helper.log_info("Finished grabbing Cape Reports")
self.helper.log_info(
f"Run Complete. Sleeping until next run in " f"{self.interval} Minutes"
)
time.sleep(self.get_interval())
if __name__ == "__main__":
try:
CONNECTOR = capeConnector()
CONNECTOR.run()
except Exception as e:
raise e
|
the-stack_0_20889 | """testauto214_dev_23570 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "TestAuto214"
admin.site.site_title = "TestAuto214 Admin Portal"
admin.site.index_title = "TestAuto214 Admin"
# swagger
api_info = openapi.Info(
title="TestAuto214 API",
default_version="v1",
description="API documentation for TestAuto214 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_0_20891 | # Sebastian Raschka 2017
#
# screenlamp is a Python toolkit
# for hypothesis-driven virtual screening.
#
# Copyright (C) 2017 Michigan State University
# License: Apache v2
#
# Software author: Sebastian Raschka <http://sebastianraschka.com>
# Software author email: [email protected]
#
# Software source repository: https://github.com/rasbt/screenlamp
# Documentation: https://psa-lab.github.io/screenlamp
#
# screenlamp was developed in the
# Protein Structural Analysis & Design Laboratory
# (http://www.kuhnlab.bmb.msu.edu)
#
# If you are using screenlamp in your research, please cite
# the following journal article:
#
# Raschka, Sebastian, Anne M. Scott, Nan Liu,
# Santosh Gunturu, Mar Huertas, Weiming Li,
# and Leslie A. Kuhn. 2017
#
# Enabling the hypothesis-driven prioritization of
# ligand candidates in big databases:
# Screenlamp and its application to GPCR inhibitor
# discovery for invasive species control.
#
import os
import argparse
import sys
import pandas as pd
import time
from mputil import lazy_imap
from multiprocessing import cpu_count
from biopandas.mol2 import split_multimol2
from biopandas.mol2 import PandasMol2
def parse_distance_string(s):
dist = [int(p.strip()) for p in s.split('-')]
return dist
def get_mol2_files(dir_path):
files = []
if os.path.isdir(dir_path):
for f in os.listdir(dir_path):
if f.endswith(('.mol2', 'mol2.gz')):
file_path = os.path.join(dir_path, f)
files.append(file_path)
elif (os.path.isfile(dir_path) and
dir_path.endswith(('.mol2', 'mol2.gz'))):
files.append(dir_path)
return files
def parse_selection_string(s, df_name='pdmol.df'):
columns = ['(atom_id', '(atom_name', '(atom_type',
'(subst_id', '(subst_name', '(charge']
lst = [subs.strip() for subs in s.split('-->')]
parsed = []
for subs in lst:
for c in columns:
subs = subs.replace(c, '(%s.%s' % (df_name, c[1:]))
parsed.append(subs)
return parsed
def data_processor(mol2):
pdmol = PandasMol2().read_mol2_from_list(mol2_lines=mol2[1],
mol2_code=mol2[0])
coordinates = pdmol.df.loc[pd.eval(SELECTION[0]), ['x', 'y', 'z']].values
pdmol._df = pdmol._df[pd.eval(SELECTION[1])]
for xyz in coordinates:
distances = pdmol.distance(xyz)
match = ((distances.values >= DISTANCE[0]).any() and
(distances.values <= DISTANCE[1]).any())
if match:
return mol2[0]
return ''
def data_processor_gz(mol2_gz):
pdmol = PandasMol2().read_mol2_from_list(mol2_lines=mol2_gz[1],
mol2_code=mol2_gz[0])
coordinates = pdmol.df.loc[pd.eval(SELECTION[0]), ['x', 'y', 'z']].values
pdmol._df = pdmol._df[pd.eval(SELECTION[1])]
for xyz in coordinates:
distances = pdmol.distance(xyz)
match = ((distances.values >= DISTANCE[0]).any() and
(distances.values <= DISTANCE[1]).any())
if match:
return mol2_gz[0].decode('utf-8')
return ''
def read_and_write(mol2_files, id_file_path, verbose, n_cpus):
if verbose:
sys.stdout.write('Using selection: %s\n' % SELECTION)
sys.stdout.flush()
with open(id_file_path, 'w') as f:
for mol2_file in mol2_files:
if verbose:
start = time.time()
sys.stdout.write('Processing %s' % os.path.basename(mol2_file))
sys.stdout.flush()
cnt = 0
if mol2_file.endswith('.gz'):
data_processor_fn = data_processor_gz
else:
data_processor_fn = data_processor
for chunk in lazy_imap(data_processor=data_processor_fn,
data_generator=split_multimol2(mol2_file),
n_cpus=n_cpus):
_ = [f.write('%s\n' % mol2_id)for mol2_id in chunk if mol2_id]
cnt += len(chunk)
if verbose:
elapsed = time.time() - start
sys.stdout.write(' | %d mol/sec\n' % (cnt / elapsed))
sys.stdout.flush()
def get_num_cpus(n_cpus):
if not n_cpus:
n_cpus = cpu_count()
elif n_cpus < 0:
n_cpus = cpu_count() - n_cpus
return n_cpus
def main(input_dir, output_file, verbose, n_cpus):
n_cpus = get_num_cpus(n_cpus)
dirpath = os.path.dirname(output_file)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
mol2_files = get_mol2_files(dir_path=input_dir)
read_and_write(mol2_files=mol2_files,
id_file_path=output_file,
verbose=verbose,
n_cpus=n_cpus)
if verbose:
print('Finished')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A command line tool for filtering mol2 files'
'\nby the distance of two atoms or functional groups.',
epilog="""Example:
python funcgroup_distance_to_id.py\\
--input mol2_dir/\\
--output ids.txt\\
--selection "((atom_type == \'S.3\') | (atom_type == \'S.o2\')) --> (atom_type == \'O.2\')"\\
--distance 13-20\\
--processes 0
\# The example above selects those molecules
\# that contain S.2 or S.o2 atom that is within
\# a 13-20 angstroms distance to an 'O.2' (sp2/keto oxygen) atom
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input',
type=str,
required=True,
help='(Required.) Path to a `.mol2` or `.mol2.gz` file,'
'\nor a directory containing `.mol2`/`.mol2.gz`'
'files.')
parser.add_argument('-o', '--output',
type=str,
required=True,
help='(Required.) Directory for writing the output files.')
parser.add_argument('-s', '--selection',
type=str,
required=True,
help='(Required.) Selection condition for the atom distance'
' checks.'
'\n1) Selection example to compare 2 atom types:'
'\n `"(atom_type == \'S.o2\') -->'
' (atom_type == \'O.2\')"`.'
'\n2) Selection example to consider either'
' an S.o2 or S.3 atom to an O.2 atom:'
'\n `"((atom_type == \'S.3\') |'
' (atom_type == \'S.o2\')) -->'
' (atom_type == \'O.2\')"`.'
'\n3) Selection example using logical ORs on '
'both sides:\n'
' `"((atom_type == \'S.3\') | (atom_type == '
'\'S.o2\'))'
' --> ((atom_type == \'O.2\') |'
' (atom_type == \'O.3\'))"`.')
parser.add_argument('-d', '--distance',
type=str,
required=True,
help='(Required.) A distance range formatted'
'\n as "lowerbound-upperbound".'
'\nFor example, if 13-20 is provided as an'
'\nargument, two atoms are considered a match'
'\nif they are not closer than 13 angstroms and'
'\n not farther than 20 angstroms.')
parser.add_argument('--processes',
type=int,
default=1,
help='(Optional, default: `1`.) Number of processes to run in parallel.'
'\nIf processes > 0, the specified number of CPUs'
'\nwill be used.'
'\nIf processes = 0, all available CPUs will'
'\nbe used.'
'\nIf processes = -1, all available CPUs'
'\nminus `processes` will be used.')
parser.add_argument('-v', '--verbose',
type=int,
default=1,
help='(Optional, default: `1`.) Verbosity level. If 0, does not print any'
' output.'
'\nIf 1 (default), prints the file currently'
' processing.')
parser.add_argument('--version', action='version', version='v. 1.0')
args = parser.parse_args()
DISTANCE = parse_distance_string(args.distance)
if len(DISTANCE) != 2:
raise ValueError("Make sure you only have a lower and upper bound"
" for --distance"
"\nFor example 13-20")
SELECTION = parse_selection_string(args.selection)
if len(SELECTION) != 2:
raise ValueError("Make sure you have 2 --selection criteria"
" separated via '-->', for example,"
"\n\"((atom_type == 'S.3') |"
" (atom_type == 'S.o2'))\"")
main(input_dir=args.input,
output_file=args.output,
verbose=args.verbose,
n_cpus=args.processes)
|
the-stack_0_20894 | import pandas
df = pandas.read_csv('data.csv')
print(df.head())
df = df.drop('Name', axis=1)
df['Sex'] = df['Sex'].replace('female', '0').replace('male', '1')
df['Embarked_num'] = df.Embarked.map({'S': 0, 'C': 1, 'Q': 2})
df.dropna(subset=['Age', 'Embarked', 'Cabin'], inplace=True)
print(df.head())
df.to_csv('res_data.csv', index=0)
|
the-stack_0_20895 | from tornado.platform.asyncio import AnyThreadEventLoopPolicy
import json
import asyncio
import aiohttp
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
APIKEY = '259ddcb11156c1648597938984b52919f458ec88e45a6364276e863b3289aadd'
class Holidays():
def __init__(self):
pass
async def main(self, year, country):
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://calendarific.com/api/v2/holidays?&api_key={APIKEY}&country={country}&year={year}') as req:
text = await req.text()
return json.loads(text)
def createHolidays(self, year, country):
loop = asyncio.get_event_loop()
holidays = loop.run_until_complete(self.main(year, country))
return holidays
def getHoliday(self, day, month, year, dict):
list = []
iso = f'{year}-{"%02d" % month}-{"%02d" % day}'
for event in dict["response"]["holidays"]:
if event["date"]["iso"] == iso:
list.append(event)
return list
if __name__ == '__main__':
print(Holidays().getHoliday(10, 5, 2021, Holidays().createHolidays(2021, 'US'))[1])
|
the-stack_0_20896 | from ingenico.direct.sdk.data_object import DataObject
class ShoppingCartExtension(DataObject):
def __init__(self, creator, name, version, extension_id=None):
if not creator:
raise ValueError("creator is required")
if not name:
raise ValueError("name is required")
if not version:
raise ValueError("version is required")
self.__creator = creator
self.__name = name
self.__version = version
self.__extension_id = extension_id
def to_dictionary(self):
dictionary = super(ShoppingCartExtension, self).to_dictionary()
if self.__creator is not None:
dictionary['creator'] = self.__creator
if self.__name is not None:
dictionary['name'] = self.__name
if self.__version is not None:
dictionary['version'] = self.__version
if self.__extension_id is not None:
dictionary['extensionId'] = self.__extension_id
return dictionary
def from_dictionary(self, dictionary):
super(ShoppingCartExtension, self).from_dictionary(dictionary)
if 'creator' in dictionary:
self.__creator = dictionary['creator']
if 'name' in dictionary:
self.__name = dictionary['name']
if 'version' in dictionary:
self.__version = dictionary['version']
if 'extensionId' in dictionary:
self.__extension_id = dictionary['extensionId']
return self
@staticmethod
def create_from_dictionary(dictionary):
if 'creator' in dictionary:
creator = dictionary['creator']
else:
raise ValueError("creator is required")
if 'name' in dictionary:
name = dictionary['name']
else:
raise ValueError("name is required")
if 'version' in dictionary:
version = dictionary['version']
else:
raise ValueError("version is required")
if 'extensionId' in dictionary:
extension_id = dictionary['extensionId']
else:
extension_id = None
return ShoppingCartExtension(creator, name, version, extension_id)
@property
def creator(self):
return self.__creator
@property
def name(self):
return self.__name
@property
def version(self):
return self.__version
@property
def extension_id(self):
return self.__extension_id
|
the-stack_0_20897 | import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly
from dash.dependencies import Input, Output
import pymongo
import plotly.graph_objects as go
import time
print("------- BIENVENIDO A iHashTag --------")
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(
html.Div([
html.H4('Hashtag Live Sentiment'),
html.Div(id='live-update-text'),
dcc.Graph(id='live-update-graph'),
dcc.Interval(
id='interval-component',
interval=1*1000, # in milliseconds
n_intervals=0
)
])
)
@app.callback(Output('live-update-text', 'children'),
[Input('interval-component', 'n_intervals')])
def update_metrics(n):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["dashdata"]
mycol = mydb["#Impeachment"]
x = mycol.find_one()
style = {'padding': '5px', 'fontSize': '16px'}
file = open("resultados.txt", "a")
string = time.strftime("%d/%m/%y %H:%M:%S") + "," + x["muypositivo"] + ","+ x["positivo"] + "," + "," + \
x["neutro"] + "," + x["negativo"] + "," +x["average"] + "\n"
file.write(string)
file.close()
print("hola")
return [
html.Span('Muy Positivos: {0:.2f}'.format(float(x["muypositivo"])), style=style),
html.Span('Positivos: {0:.2f}'.format(float(x["positivo"])), style=style),
html.Span('Neutros: {0:.2f}'.format(float(x["neutro"])), style=style),
html.Span('Negativos: {0:0.2f}'.format(float(x["negativo"])), style=style),
html.Span('Muy negativos: {0:0.2f}'.format(float(x["muynegativo"])), style=style),
html.Span('Media Total: {0:0.2f}'.format(float(x["average"])), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('live-update-graph', 'figure'),
[Input('interval-component', 'n_intervals')])
def update_graph_live(n):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["dashdata"]
mycol = mydb["#Impeachment"]
data = {
'positivo': [],
'negativo': [],
'neutro': []
}
x = mycol.find_one()
data['positivo'].append(float(x["positivo"]))
data['negativo'].append(float(x["negativo"]))
data['neutro'].append(float(x["neutro"]))
# Create the graph with subplots
fig = go.Figure(data=go.Bar(name = 'Tweet Sentiment',x=["Muy Positivo", "Positivo", "Neutral", "Negativo", "Muy Negativo"],
y=[float(x['muypositivo']),float(x["positivo"]), float(x["neutro"]), float(x["negativo"]), float(x["muynegativo"])],
marker_color=['cyan','green','gray','red','pink'], marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6, text=[float(x['muypositivo']),float(x["positivo"]), float(x["neutro"]), float(x["negativo"]), float(x["muynegativo"])],
textposition='auto'
))
fig.add_trace(
go.Scatter(name="Average",
x=["Muy Positivo", "Positivo", "Neutral", "Negativo", "Muy Negativo"],
y=[float(x['average']), float(x['average']), float(x['average']), float(x['average']), float(x['average'])]
))
return fig
if __name__ == '__main__':
app.run_server(debug=True)
|
the-stack_0_20898 | from typing import List
import logging
import os
import time
import sentry_sdk
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from model import init_model
sentry_sdk.init(os.getenv("SENTRY_DSN"))
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class_dict, counters, label_to_name = init_model()
def predict(label_name):
try:
class_id = class_dict[label_name]
except KeyError:
return {}
sorted_classes = sorted(enumerate(counters[class_id]), reverse=False, key=lambda x: x[1])
sorted_classes = [x for x in sorted_classes if x[1] > 0]
return [{"prediction": label_to_name[label], "confidence": probability} for label, probability in sorted_classes]
try:
predict("Reply.Acknowledge")
logger.info("model loaded, test query processed")
except Exception as e:
logger.exception("model not loaded")
sentry_sdk.capture_exception(e)
raise e
async def handler(payload: List[str]):
responses = [{}] * len(payload)
try:
responses = [predict(speech_function) for speech_function in payload]
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
return responses
@app.post("/model")
async def answer(payload: List[str]):
st_time = time.time()
responses = await handler(payload)
total_time = time.time() - st_time
logger.info(f"speech_function_predictor model exec time: {total_time:.3f}s")
return responses
@app.post("/annotation")
async def annotation(payload: List[str]):
st_time = time.time()
responses = await handler(payload)
total_time = time.time() - st_time
logger.info(f"speech_function_predictor batch exec time: {total_time:.3f}s")
return [{"batch": responses}]
|
the-stack_0_20900 | import math
import numpy as np
import cv2
from gym.envs.box2d.car_racing import PLAYFIELD, FPS, STATE_H, STATE_W, VIDEO_H, VIDEO_W, WINDOW_H, WINDOW_W, SCALE, \
ZOOM
from gym.envs.box2d.car_racing import CarRacing as GymCarRacing
from gym.envs.classic_control import rendering
from gym import spaces
import pyglet
from pyglet import gl
from environments.srl_env import SRLGymEnv
from state_representation.episode_saver import EpisodeSaver
MAX_STEPS = 10000
RENDER_HEIGHT = 224
RENDER_WIDTH = 224
N_DISCRETE_ACTIONS = 4
RELATIVE_POS = True # Use relative position for ground truth
def getGlobals():
"""
:return: (dict)
"""
return globals()
class CarRacingEnv(GymCarRacing, SRLGymEnv):
def __init__(self, name="car_racing", renders=False, record_data=False, is_discrete=True, state_dim=-1,
learn_states=False, save_path='srl_zoo/data/', srl_model="raw_pixels", env_rank=0, srl_pipe=None,
lookahead=5, **_):
"""
Gym wrapper for Racing car environment
WARNING: to be compatible with kuka scripts, additional keyword arguments are discarded
:param name: (str) name of the folder where recorded data will be stored
:param renders: (bool) Whether to display the GUI or not
:param record_data: (bool) Set to true, record frames with the rewards.
:param is_discrete: (bool) Whether to use discrete or continuous actions
:param state_dim: (int) When learning states
:param learn_states: (bool)
:param save_path: (str) location where the saved data should go
:param srl_model: (str) The SRL_model used
:param env_rank: (int) the number ID of the environment
:param srl_pipe: (Queue, [Queue]) contains the input and output of the SRL model
:param lookahead: (int) How many segments ahead of the current position of the track should the target be
"""
SRLGymEnv.__init__(self, srl_model=srl_model, relative_pos=RELATIVE_POS, env_rank=env_rank, srl_pipe=srl_pipe, img_shape=None)
GymCarRacing.__init__(self)
self._renders = renders
self.img_shape = img_shape
if self.img_shape is None:
self._width = RENDER_WIDTH
self._height = RENDER_HEIGHT
else:
self._height, self._width = self.img_shape[1:]
self._is_discrete = is_discrete
self.lookahead = lookahead
self.relative_pos = RELATIVE_POS
self._env_step_counter = 0
self._observation = None
self.saver = None
if record_data:
self.saver = EpisodeSaver(name, None, state_dim, globals_=getGlobals(), relative_pos=RELATIVE_POS,
learn_states=learn_states, path=save_path)
# Accelerate, brake, stear left, stear right
if self._is_discrete:
self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS)
else:
self.action_space = spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
if self.srl_model == "ground_truth":
self.state_dim = self.getGroundTruthDim()
if self.srl_model == "raw_pixels":
self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 3), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.state_dim,), dtype=np.float32)
def getTargetPos(self):
# get the nearest track segment to the current position
# then return the track segment position that is ahead of the nearest track segement
nearest_idx = np.argmin(list(map(
lambda a: np.sqrt(np.sum(((list(a[2:4]) + [0, 0, 0]) - self.getGroundTruth()) ** 2)),
self.track)))
return np.array(list(self.track[(nearest_idx + self.lookahead) % len(self.track)][2:4]) + [0, 0, 0])
@staticmethod
def getGroundTruthDim():
return 5
def getGroundTruth(self):
# the car's current x,y position, angle, speed and angular speed
return np.array(list(self.car.__dict__["hull"].position) +
[self.car.__dict__["hull"].angle,
self.car.__dict__["hull"].inertia,
self.car.__dict__["hull"].angularVelocity])
def getObservation(self):
"""
:return: (numpy array)
"""
self._observation = self.render("rgb_array")
self._observation = cv2.resize(self._observation, (self._width, self._height))
return self._observation
def reset(self):
super().reset()
self._env_step_counter = 0
self.getObservation()
if self.saver is not None:
self.saver.reset(self._observation, self.getTargetPos(), self.getGroundTruth())
if self.srl_model != "raw_pixels":
return self.getSRLState(self._observation)
return self._observation
def step(self, action):
if action is not None:
if self._is_discrete:
self.car.steer([-1, 1, 0, 0][action])
self.car.gas([0, 0, 1, 0][action])
self.car.brake([0, 0, 0, 1][action])
else:
self.car.steer(-action[0])
self.car.gas(action[1])
self.car.brake(action[2])
self.car.step(1.0 / FPS)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
self.t += 1.0 / FPS
self.getObservation()
step_reward = 0
done = False
if action is not None: # First step without action, called from reset()
self.reward -= 0.1
self._env_step_counter += 1
# We actually don't want to count fuel spent, we want car to be faster.
self.car.fuel_spent = 0.0
step_reward = self.reward - self.prev_reward
self.prev_reward = self.reward
if self.tile_visited_count == len(self.track) or self._env_step_counter >= MAX_STEPS:
done = True
x, y = self.car.hull.position
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
done = True
step_reward = -100
if self.saver is not None:
self.saver.step(self._observation, action, step_reward, done, self.getGroundTruth())
if self.srl_model != "raw_pixels":
return self.getSRLState(self._observation), step_reward, done, {}
return np.array(self._observation), step_reward, done, {}
# Copied for the original Gym Racing Car env, it is modified to be able to remove the render window.
def render(self, mode='human'):
if self.viewer is None:
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
self.viewer.window.set_visible(self._renders)
self.score_label = pyglet.text.Label('0000', font_size=36, x=20, y=WINDOW_H * 2.5 / 40.00, anchor_x='left',
anchor_y='center', color=(255, 255, 255, 255))
self.transform = rendering.Transform()
if "t" not in self.__dict__:
return # reset() not called yet
zoom = 0.1 * SCALE * max(1 - self.t, 0) + ZOOM * SCALE * min(self.t, 1) # Animate zoom first second
scroll_x = self.car.hull.position[0]
scroll_y = self.car.hull.position[1]
angle = -self.car.hull.angle
vel = self.car.hull.linearVelocity
if np.linalg.norm(vel) > 0.5:
angle = math.atan2(vel[0], vel[1])
self.transform.set_scale(zoom, zoom)
self.transform.set_translation(
WINDOW_W / 2 - (scroll_x * zoom * math.cos(angle) - scroll_y * zoom * math.sin(angle)),
WINDOW_H / 4 - (scroll_x * zoom * math.sin(angle) + scroll_y * zoom * math.cos(angle)))
self.transform.set_rotation(angle)
self.car.draw(self.viewer, mode != "state_pixels")
arr = None
win = self.viewer.window
if mode != 'state_pixels':
win.switch_to()
win.dispatch_events()
if mode == "rgb_array" or mode == "state_pixels":
win.clear()
t = self.transform
if mode == 'rgb_array':
VP_W = VIDEO_W
VP_H = VIDEO_H
else:
VP_W = STATE_W
VP_H = STATE_H
gl.glViewport(0, 0, VP_W, VP_H)
t.enable()
self.render_road()
for geom in self.viewer.onetime_geoms:
geom.render()
t.disable()
self.render_indicators(WINDOW_W, WINDOW_H) # TODO: find why 2x needed, wtf
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(VP_H, VP_W, 4)
arr = arr[::-1, :, 0:3]
# agent can call or not call env.render() itself when recording video.
if mode == "rgb_array" and not self.human_render:
win.flip()
if mode == 'human':
self.human_render = True
win.clear()
t = self.transform
gl.glViewport(0, 0, WINDOW_W, WINDOW_H)
t.enable()
self.render_road()
for geom in self.viewer.onetime_geoms:
geom.render()
t.disable()
self.render_indicators(WINDOW_W, WINDOW_H)
win.flip()
self.viewer.onetime_geoms = []
return arr
|
the-stack_0_20901 | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule, ModuleList, Sequential
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from .resnet import BasicBlock, Bottleneck
class HRModule(BaseModule):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
block_init_cfg=None,
init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
block.expansion)[1])
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False),
build_norm_layer(self.norm_cfg, in_channels[i])[1],
nn.Upsample(
scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[i])[1]))
else:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[j])[1],
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
@BACKBONES.register_module()
class PoseHighResolutionNet(BaseModule):
"""HRNet backbone.
`High-Resolution Representations for Labeling Pixels and Regions
arXiv: <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules(int): The number of HRModule in this stage.
- num_branches(int): The number of branches in the HRModule.
- block(str): The type of convolution block.
- num_blocks(tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels(tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Default: 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
num_joints(int): the number of output for the final layer. Default: 24.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self,
extra,
in_channels=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
with_cp=False,
num_joints=24,
zero_init_residual=False,
multiscale_output=True,
pretrained=None,
init_cfg=None):
super(PoseHighResolutionNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
# Assert configurations of 4 stages are in extra
assert 'stage1' in extra and 'stage2' in extra \
and 'stage3' in extra and 'stage4' in extra
# Assert whether the length of `num_blocks` and `num_channels` are
# equal to `num_branches`
for i in range(4):
cfg = extra[f'stage{i + 1}']
assert len(cfg['num_blocks']) == cfg['num_branches'] and \
len(cfg['num_channels']) == cfg['num_branches']
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
# stem net
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
self.conv_cfg,
64,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = num_channels * block.expansion
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
# stage 2
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels],
num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
# self.pretrained_layers = extra['pretrained_layers']
self.final_layer = build_conv_layer(
cfg=self.conv_cfg,
in_channels=pre_stage_channels[0],
out_channels=num_joints,
kernel_size=extra['final_conv_kernel'],
stride=1,
padding=1 if extra['final_conv_kernel'] == 3 else 0)
if extra['downsample'] and extra['use_conv']:
self.downsample_stage_1 = self._make_downsample_layer(
3, num_channel=self.stage2_cfg['num_channels'][0])
self.downsample_stage_2 = self._make_downsample_layer(
2, num_channel=self.stage2_cfg['num_channels'][-1])
self.downsample_stage_3 = self._make_downsample_layer(
1, num_channel=self.stage3_cfg['num_channels'][-1])
elif not extra['downsample'] and extra['use_conv']:
self.upsample_stage_2 = self._make_upsample_layer(
1, num_channel=self.stage2_cfg['num_channels'][-1])
self.upsample_stage_3 = self._make_upsample_layer(
2, num_channel=self.stage3_cfg['num_channels'][-1])
self.upsample_stage_4 = self._make_upsample_layer(
3, num_channel=self.stage4_cfg['num_channels'][-1])
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer,
num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
num_channels_cur_layer[i])[1],
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(i + 1 - num_branches_pre):
in_channels = num_channels_pre_layer[-1]
out_channels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else in_channels
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg,
))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
block_init_cfg=block_init_cfg))
return Sequential(*hr_modules), in_channels
def _make_upsample_layer(self, num_layers, num_channel, kernel_size=3):
layers = []
for i in range(num_layers):
layers.append(
nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True))
layers.append(
build_conv_layer(
cfg=self.conv_cfg,
in_channels=num_channel,
out_channels=num_channel,
kernel_size=kernel_size,
stride=1,
padding=1,
bias=False,
))
layers.append(build_norm_layer(self.norm_cfg, num_channel)[1])
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def _make_downsample_layer(self, num_layers, num_channel, kernel_size=3):
layers = []
for i in range(num_layers):
layers.append(
build_conv_layer(
cfg=self.conv_cfg,
in_channels=num_channel,
out_channels=num_channel,
kernel_size=kernel_size,
stride=2,
padding=1,
bias=False,
))
layers.append(build_norm_layer(self.norm_cfg, num_channel)[1])
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
if self.extra['return_list']:
return y_list
elif self.extra['downsample']:
if self.extra['use_conv']:
# Downsampling with strided convolutions
x1 = self.downsample_stage_1(y_list[0])
x2 = self.downsample_stage_2(y_list[1])
x3 = self.downsample_stage_3(y_list[2])
x = torch.cat([x1, x2, x3, y_list[3]], 1)
else:
# Downsampling with interpolation
x0_h, x0_w = y_list[3].size(2), y_list[3].size(3)
x1 = F.interpolate(
y_list[0],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x2 = F.interpolate(
y_list[1],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x3 = F.interpolate(
y_list[2],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x = torch.cat([x1, x2, x3, y_list[3]], 1)
else:
if self.extra['use_conv']:
# Upsampling with interpolations + convolutions
x1 = self.upsample_stage_2(y_list[1])
x2 = self.upsample_stage_3(y_list[2])
x3 = self.upsample_stage_4(y_list[3])
x = torch.cat([y_list[0], x1, x2, x3], 1)
else:
# Upsampling with interpolation
x0_h, x0_w = y_list[0].size(2), y_list[0].size(3)
x1 = F.interpolate(
y_list[1],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x2 = F.interpolate(
y_list[2],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x3 = F.interpolate(
y_list[3],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=True)
x = torch.cat([y_list[0], x1, x2, x3], 1)
return x
def train(self, mode=True):
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(PoseHighResolutionNet, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
|
the-stack_0_20902 | #!/usr/bin/python
# -*- coding:utf8 -*-
import numpy as np
import math
import Control_Exp1001 as CE
import os
import json
from Control_Exp1001.demo.thickener_chinese.thickener_chinese import Thickener
from Control_Exp1001.common.replay.replay_buffer import ReplayBuffer
from Control_Exp1001.common.action_noise.no_exploration import No_Exploration
from Control_Exp1001.demo.thickener_chinese.controllers.value_iterate import VI
from Control_Exp1001.demo.thickener_chinese.controllers.hdp import HDP
import torch
import random
from Control_Exp1001.common.penaltys.quadratic import Quadratic
import matplotlib.pyplot as plt
from Control_Exp1001.demo.thickener_chinese.common.one_round_exp import OneRoundExp
from Control_Exp1001.demo.thickener_chinese.common.one_round_evaluation import OneRoundEvaluation
penalty_para = {
#"weight_matrix": [0, 0.002],
"weight_matrix": [0, 0.004],
"S": [0.0001, 0.0008],
#"S": [0.0003, 0.0024],
#"S": [0.0000, 0.000],
}
thickner_para = {
"dt":1,
"noise_in": False,
"noise_p": 0.002,
"noise_type": 3,
'time_length': 20,# 浓密机每次仿真20秒
}
from Control_Exp1001.demo.thickener_chinese.common import exp_name
exp_name.set_exp_name('HDP_Replay')
EXP_NAME = exp_name.get_exp_name()
img_path = os.path.join('../images',EXP_NAME)
if not os.path.exists(img_path):
os.mkdir(img_path)
def new_hdp(capacity=2, batch_size=2):
predict_round=3000
gamma=0.6
replay_hdp = ReplayBuffer(capacity=capacity)
env_HDP = Thickener(
noise_p=0.03,
noise_in=True,
)
exploration = No_Exploration()
print('make new hdp controller')
hdp = HDP(
replay_buffer = replay_hdp,
u_bounds = env_HDP.u_bounds,
#exploration = None,
exploration = exploration,
env=env_HDP,
predict_training_rounds=predict_round,
gamma=gamma,
batch_size = batch_size,
predict_batch_size=32,
model_nn_error_limit = 0.0008,
critic_nn_error_limit = 0.001,
actor_nn_error_limit = 0.001,
# 0.005
actor_nn_lr = 0.003,
critic_nn_lr = 0.02,
model_nn_lr = 0.01,
indice_y = None,
indice_y_star = None,
indice_c=None,
hidden_model = 10,
hidden_critic = 14,
hidden_actor = 14,
predict_epoch= 30,
Na=220,
Nc = 500,
img_path=EXP_NAME
)
env_HDP.reset()
hdp.train_identification_model()
return hdp
def run_hdp(rounds=1000,seed=random.randint(0,1000000),name='VI',capacity=2,batch_size=2,
predict_round=3000,u_optim='adam',):
print('seed :',seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
vi = new_hdp(capacity=capacity, batch_size=batch_size)
penalty = Quadratic(**penalty_para)
env_vi = Thickener(
penalty_calculator=penalty,
**thickner_para,
)
res1 = OneRoundExp(controller=vi, env=env_vi,max_step=rounds, exp_name=name).run()
return res1
if __name__ == '__main__':
round = 1600
predict_round=800
res_list = []
rand_seed = np.random.randint(0,10000000)
rand_seed = 9726164
res_list.append(
run_hdp(rounds=round,seed=rand_seed, name='HDP-无经验回放', predict_round=predict_round, capacity=1, batch_size=1))
res_list.append(
run_hdp(rounds=round,seed=rand_seed, name='HDP-经验回放数量为2', predict_round=predict_round, capacity=2, batch_size=2))
eval_res = OneRoundEvaluation(res_list=res_list)
eval_res.plot_all()
|
the-stack_0_20903 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019-2021 PythonistaGuild
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import pathlib
import os
import re
from setuptools import setup
ROOT = pathlib.Path(__file__).parent
on_rtd = os.getenv("READTHEDOCS") == "True"
with open("requirements.txt") as f:
requirements = f.read().splitlines()
if on_rtd:
with open("docs/requirements_rtd.txt") as f:
requirements.extend(f.read().splitlines())
with open(ROOT / "wavelink" / "__init__.py", encoding="utf-8") as f:
VERSION = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
readme = ""
with open("README.rst") as f:
readme = f.read()
setup(
name="wavelink",
author="PythonistaGuild, EvieePy",
url="https://github.com/PythonistaGuild/Wavelink",
version=VERSION,
packages=["wavelink", "wavelink.ext.spotify", "wavelink.types"],
license="MIT",
description="A robust and powerful Lavalink wrapper for disnake and derivatives.",
long_description=readme,
include_package_data=True,
install_requires=requirements,
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
)
|
the-stack_0_20904 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow as tf
from builders import optimizer_builder
from builders import preprocessor_builder
from core import batcher
from core import preprocessor
from core import standard_fields as fields
from utils import ops as util_ops
from utils import variables_helper
from deployment import model_deploy
slim = tf.contrib.slim
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.to_float(images)
tensor_dict[fields.InputDataFields.image] = float_images
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores
in tensor_dict)
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list,
groundtruth_weights_list) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list,
groundtruth_weights_list=groundtruth_weights_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the inference graph is
built (before optimization). This is helpful to perform additional changes
to the training graph such as adding FakeQuant ops. The function should
modify the default graph.
Raises:
ValueError: If both num_clones > 1 and train_config.sync_replicas is true.
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
if num_clones != 1 and train_config.sync_replicas:
raise ValueError('In Synchronous SGD mode num_clones must ',
'be 1. Found num_clones: {}'.format(num_clones))
batch_size = train_config.batch_size // num_clones
if train_config.sync_replicas:
batch_size //= train_config.replicas_to_aggregate
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(
batch_size, create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity, data_augmentation_options)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var, family='LearningRate')
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=worker_replicas)
sync_optimizer = training_optimizer
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram('ModelVars/' +
model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name,
loss_tensor))
global_summaries.add(
tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint,
include_global_step=False))
init_saver = tf.train.Saver(available_var_map)
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
|
the-stack_0_20905 | #!/usr/bin/env python2.7
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run a pinned gsutil."""
import argparse
import base64
import contextlib
import hashlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
import zipfile
GSUTIL_URL = 'https://storage.googleapis.com/pub/'
API_URL = 'https://www.googleapis.com/storage/v1/b/pub/o/'
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_BIN_DIR = os.path.join(THIS_DIR, 'external_bin', 'gsutil')
DEFAULT_FALLBACK_GSUTIL = os.path.join(
THIS_DIR, 'third_party', 'gsutil', 'gsutil')
IS_WINDOWS = os.name == 'nt'
class InvalidGsutilError(Exception):
pass
def download_gsutil(version, target_dir):
"""Downloads gsutil into the target_dir."""
filename = 'gsutil_%s.zip' % version
target_filename = os.path.join(target_dir, filename)
# Check if the target exists already.
if os.path.exists(target_filename):
md5_calc = hashlib.md5()
with open(target_filename, 'rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
md5_calc.update(buf)
local_md5 = md5_calc.hexdigest()
metadata_url = '%s%s' % (API_URL, filename)
metadata = json.load(urllib2.urlopen(metadata_url))
remote_md5 = base64.b64decode(metadata['md5Hash'])
if local_md5 == remote_md5:
return target_filename
os.remove(target_filename)
# Do the download.
url = '%s%s' % (GSUTIL_URL, filename)
u = urllib2.urlopen(url)
with open(target_filename, 'wb') as f:
while True:
buf = u.read(4096)
if not buf:
break
f.write(buf)
return target_filename
@contextlib.contextmanager
def temporary_directory(base):
tmpdir = tempfile.mkdtemp(prefix='gsutil_py', dir=base)
try:
yield tmpdir
finally:
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
def ensure_gsutil(version, target, clean):
bin_dir = os.path.join(target, 'gsutil_%s' % version)
gsutil_bin = os.path.join(bin_dir, 'gsutil', 'gsutil')
gsutil_flag = os.path.join(bin_dir, 'gsutil', 'install.flag')
# We assume that if gsutil_flag exists, then we have a good version
# of the gsutil package.
if not clean and os.path.isfile(gsutil_flag):
# Everything is awesome! we're all done here.
return gsutil_bin
if not os.path.exists(target):
os.makedirs(target)
with temporary_directory(target) as instance_dir:
# Clean up if we're redownloading a corrupted gsutil.
cleanup_path = os.path.join(instance_dir, 'clean')
try:
os.rename(bin_dir, cleanup_path)
except (OSError, IOError):
cleanup_path = None
if cleanup_path:
shutil.rmtree(cleanup_path)
download_dir = os.path.join(instance_dir, 'download')
target_zip_filename = download_gsutil(version, instance_dir)
with zipfile.ZipFile(target_zip_filename, 'r') as target_zip:
target_zip.extractall(download_dir)
try:
os.rename(download_dir, bin_dir)
except (OSError, IOError):
# Something else did this in parallel.
pass
# Final check that the gsutil bin exists. This should never fail.
if not os.path.isfile(gsutil_bin):
raise InvalidGsutilError()
# Drop a flag file.
with open(gsutil_flag, 'w') as f:
f.write('This flag file is dropped by gsutil.py')
return gsutil_bin
def run_gsutil(force_version, fallback, target, args, clean=False):
if force_version:
gsutil_bin = ensure_gsutil(force_version, target, clean)
else:
gsutil_bin = fallback
disable_update = ['-o', 'GSUtil:software_update_check_period=0']
if sys.platform == 'cygwin':
# This script requires Windows Python, so invoke with depot_tools'
# Python.
def winpath(path):
return subprocess.check_output(['cygpath', '-w', path]).strip()
cmd = ['python.bat', winpath(__file__)]
cmd.extend(args)
sys.exit(subprocess.call(cmd))
assert sys.platform != 'cygwin'
# Run "gsutil" through "vpython". We need to do this because on GCE instances,
# expectations are made about Python having access to "google-compute-engine"
# and "boto" packages that are not met with non-system Python (e.g., bundles).
cmd = [
'vpython',
'-vpython-spec', os.path.join(THIS_DIR, 'gsutil.vpython'),
'--',
gsutil_bin
] + disable_update + args
return subprocess.call(cmd, shell=IS_WINDOWS)
def parse_args():
bin_dir = os.environ.get('DEPOT_TOOLS_GSUTIL_BIN_DIR', DEFAULT_BIN_DIR)
parser = argparse.ArgumentParser()
parser.add_argument('--force-version', default='4.28')
parser.add_argument('--clean', action='store_true',
help='Clear any existing gsutil package, forcing a new download.')
parser.add_argument('--fallback', default=DEFAULT_FALLBACK_GSUTIL)
parser.add_argument('--target', default=bin_dir,
help='The target directory to download/store a gsutil version in. '
'(default is %(default)s).')
parser.add_argument('args', nargs=argparse.REMAINDER)
args, extras = parser.parse_known_args()
if args.args and args.args[0] == '--':
args.args.pop(0)
if extras:
args.args = extras + args.args
return args
def main():
args = parse_args()
return run_gsutil(args.force_version, args.fallback, args.target, args.args,
clean=args.clean)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_20906 | """
Petrophysically guided inversion (PGI): Linear example
======================================================
We do a comparison between the classic Tikhonov inversion
and our formulation of a petrophysically constrained inversion.
We explore it through the UBC linear example.
"""
#####################
# Tikhonov Inversion#
#####################
import discretize as Mesh
from SimPEG import (
simulation,
maps,
data_misfit,
directives,
optimization,
regularization,
inverse_problem,
inversion,
utils,
)
import numpy as np
import matplotlib.pyplot as plt
# Random seed for reproductibility
np.random.seed(1)
# Mesh
N = 100
mesh = Mesh.TensorMesh([N])
# Survey design parameters
nk = 20
jk = np.linspace(1.0, 60.0, nk)
p = -0.25
q = 0.25
# Physics
def g(k):
return np.exp(p * jk[k] * mesh.cell_centers_x) * np.cos(
np.pi * q * jk[k] * mesh.cell_centers_x
)
G = np.empty((nk, mesh.nC))
for i in range(nk):
G[i, :] = g(i)
# True model
mtrue = np.zeros(mesh.nC)
mtrue[mesh.cell_centers_x > 0.2] = 1.0
mtrue[mesh.cell_centers_x > 0.35] = 0.0
t = (mesh.cell_centers_x - 0.65) / 0.25
indx = np.abs(t) < 1
mtrue[indx] = -(((1 - t**2.0) ** 2.0)[indx])
mtrue = np.zeros(mesh.nC)
mtrue[mesh.cell_centers_x > 0.3] = 1.0
mtrue[mesh.cell_centers_x > 0.45] = -0.5
mtrue[mesh.cell_centers_x > 0.6] = 0
# SimPEG problem and survey
prob = simulation.LinearSimulation(mesh, G=G, model_map=maps.IdentityMap())
std = 0.01
survey = prob.make_synthetic_data(mtrue, relative_error=std, add_noise=True)
# Setup the inverse problem
reg = regularization.Tikhonov(mesh, alpha_s=1.0, alpha_x=1.0)
dmis = data_misfit.L2DataMisfit(data=survey, simulation=prob)
opt = optimization.ProjectedGNCG(maxIter=10, maxIterCG=50, tolCG=1e-4)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
directiveslist = [
directives.BetaEstimate_ByEig(beta0_ratio=1e-5),
directives.BetaSchedule(coolingFactor=10.0, coolingRate=2),
directives.TargetMisfit(),
]
inv = inversion.BaseInversion(invProb, directiveList=directiveslist)
m0 = np.zeros_like(mtrue)
mnormal = inv.run(m0)
#########################################
# Petrophysically constrained inversion #
#########################################
# fit a Gaussian Mixture Model with n components
# on the true model to simulate the laboratory
# petrophysical measurements
n = 3
clf = utils.WeightedGaussianMixture(
mesh=mesh,
n_components=n,
covariance_type="full",
max_iter=100,
n_init=3,
reg_covar=5e-4,
)
clf.fit(mtrue.reshape(-1, 1))
# Petrophyically constrained regularization
reg = utils.make_PGI_regularization(
gmmref=clf,
mesh=mesh,
alpha_s=1.0,
alpha_x=1.0,
)
# Optimization
opt = optimization.ProjectedGNCG(maxIter=10, maxIterCG=50, tolCG=1e-4)
opt.remember("xc")
# Setup new inverse problem
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
# directives
Alphas = directives.AlphasSmoothEstimate_ByEig(alpha0_ratio=10.0, verbose=True)
beta = directives.BetaEstimate_ByEig(beta0_ratio=1e-6)
betaIt = directives.PGI_BetaAlphaSchedule(
verbose=True,
coolingFactor=2.0,
warmingFactor=1.0,
tolerance=0.1,
update_rate=1,
progress=0.2,
)
targets = directives.MultiTargetMisfits(verbose=True)
petrodir = directives.PGI_UpdateParameters()
addmref = directives.PGI_AddMrefInSmooth(verbose=True)
# Setup Inversion
inv = inversion.BaseInversion(
invProb, directiveList=[Alphas, beta, petrodir, targets, addmref, betaIt]
)
# Initial model same as for Tikhonov
mcluster = inv.run(m0)
# Final Plot
fig, axes = plt.subplots(1, 3, figsize=(12 * 1.2, 4 * 1.2))
for i in range(prob.G.shape[0]):
axes[0].plot(prob.G[i, :])
axes[0].set_title("Columns of matrix G")
axes[1].hist(mtrue, bins=20, linewidth=3.0, density=True, color="k")
axes[1].set_xlabel("Model value")
axes[1].set_xlabel("Occurence")
axes[1].hist(mnormal, bins=20, density=True, color="b")
axes[1].hist(mcluster, bins=20, density=True, color="r")
axes[1].legend(["Mtrue Hist.", "L2 Model Hist.", "PGI Model Hist."])
axes[2].plot(mesh.cell_centers_x, mtrue, color="black", linewidth=3)
axes[2].plot(mesh.cell_centers_x, mnormal, color="blue")
axes[2].plot(mesh.cell_centers_x, mcluster, "r-")
axes[2].plot(mesh.cell_centers_x, invProb.reg.objfcts[0].mref, "r--")
axes[2].legend(("True Model", "L2 Model", "PGI Model", "Learned Mref"))
axes[2].set_ylim([-2, 2])
plt.show()
|
the-stack_0_20910 | from typing import Optional
from enum import Enum, unique
@unique
class BinaryTreeChildSelect(Enum):
left = 0,
right = 1
class BinaryTreeNode(object):
"""
树结点 继承于 树
"""
def __init__(self, left_node=None, right_node=None, value=None):
super().__init__()
self.left_node = left_node
self.right_node = right_node
self.value = value
def __repr__(self):
return f"<BinaryTreeNode value='{self.value}'>"
@property
def depth(self):
if self.left_node is None:
left_child_depth = 0
else:
left_child_depth = self.left_node.depth
if self.right_node is None:
right_child_depth = 0
else:
right_child_depth = self.right_node.depth
return max(left_child_depth, right_child_depth) + 1
def insert_child(self, select: BinaryTreeChildSelect, node):
# 检查结点的右子树为空
if node.right_node is not None:
raise ValueError("插入结点的右结点不为空!")
if select == BinaryTreeChildSelect.left:
src_node = self.left_node
elif select == BinaryTreeChildSelect.right:
src_node = self.right_node
else:
raise TypeError
# 插入的结点的右孩子被设置为原插入位置的结点
node.right_node = src_node
self.left_node = node
def pre_order_traverse(self, visit_callback, *extra_info):
"""
使用指定的遍历回调函数前序遍历当前结点
:param visit_callback:
:param extra_info:
:return:
"""
result = visit_callback(self, *extra_info)
if result:
return result
if self.left_node is not None:
result = self.left_node.pre_order_traverse(visit_callback, *extra_info)
if result:
return result
if self.right_node is not None:
result = self.right_node.pre_order_traverse(visit_callback, *extra_info)
if result:
return result
def in_order_traverse(self, visit_callback, *extra_info):
"""
使用指定的遍历回调函数中序遍历当前结点
:param visit_callback:
:param extra_info:
:return:
"""
if self.left_node is not None:
result = self.left_node.in_order_traverse(visit_callback, *extra_info)
if result:
return result
result = visit_callback(self, *extra_info)
if result:
return result
if self.right_node is not None:
result = self.right_node.in_order_traverse(visit_callback, *extra_info)
if result:
return result
def post_order_traverse(self, visit_callback, *extra_info):
"""
使用指定的遍历回调函数后序遍历当前结点
:param visit_callback:
:param extra_info:
:return:
"""
if self.left_node is not None:
result = self.left_node.post_order_traverse(visit_callback, *extra_info)
if result:
return result
if self.right_node is not None:
result = self.right_node.post_order_traverse(visit_callback, *extra_info)
if result:
return result
result = visit_callback(self, *extra_info)
if result:
return result
@property
def end_node_count(self):
if self.left_node is None:
left_child_end_node = 0
else:
left_child_end_node = self.left_node.end_node_count
if self.right_node is None:
right_child_end_node = 0
else:
right_child_end_node = self.right_node.end_node_count
# 如果左右结点没有任何一个具有终端结点,则返回1(即当前结点为终端结点),否则返回左右结点终端结点数之和
if not any([left_child_end_node, right_child_end_node]):
return 1
else:
return left_child_end_node + right_child_end_node
class BinaryTree:
"""
二叉树
"""
# 空结点占位符
VOID_NODE_PLACEHOLDER = "$"
def __init__(self, root=None):
"""实例化新的树实例"""
self.root = root
@staticmethod
def create(definition):
"""根据前序字符串二叉树定义创建二叉树"""
char_list = list(definition)
def recursion():
if not char_list:
return None
ch = char_list.pop(0)
if ch == BinaryTree.VOID_NODE_PLACEHOLDER:
return None
else:
new_node = BinaryTreeNode()
new_node.left_node = recursion()
new_node.right_node = recursion()
new_node.value = ch
return new_node
return BinaryTree(recursion())
@property
def is_empty(self):
"""判断当前树是否为空树"""
return self.root is None
@property
def depth(self):
"""获取当前树的深度"""
if self.root is None:
return 0
return self.root.depth
@property
def end_node_count(self):
"""
获取当前树的终端结点总数
:return:
"""
if self.root is None:
return 0
return self.root.end_node_count
def get_node_parent(self, node_select_callback):
"""
使用先序遍历获取选择回调函数指定结点的父结点
:return:
"""
if self.root is None:
return None
def traverse_callback(node):
if (node.left_node and node_select_callback(node.left_node)) or \
(node.right_node and node_select_callback(node.right_node)):
return node
return self.root.pre_order_traverse(traverse_callback)
def get_node_sibling(self, node_select_callback):
"""
使用先序遍历获取选择回调函数指定结点的兄弟结点
:return:
"""
if self.root is None:
return None
def traverse_callback(node):
# 当前结点左结点存在且符合选择回调函数,返回当前结点的右结点
if node.left_node and node_select_callback(node.left_node):
return node.right_node
# 当前结点右结点存在且符合选择回调函数,返回当前结点的左结点
if node.right_node and node_select_callback(node.right_node):
return node.left_node
return self.root.pre_order_traverse(traverse_callback)
def clear(self):
"""清空树"""
self.root = None
def is_exist(self, node_select_callback) -> bool:
"""
判断当前树中是否存在符合指定结点选择回调函数的结点
:param node: 欲查询的指定结点
:return: 存在返回 True,否则返回 False
"""
if self.root is None:
return False
def exist_callback(node):
if node_select_callback(node):
return True
return bool(self.root.pre_order_traverse(exist_callback))
def pre_order_traverse(self, visit_callback, *extra_info):
"""
使用指定访问回调函数对当前树的结点进行先序遍历访问
:param visit_callback: 访问器
:param extra_info: 给访问函数的额外参数
:return: None
"""
if self.root is not None:
self.root.pre_order_traverse(visit_callback, *extra_info)
def in_order_traverse(self, visit_callback, *extra_info):
"""
使用指定访问回调函数对当前树的结点进行中序遍历访问
:param visit_callback:
:param extra_info: 给访问函数的额外参数
:return:
"""
if self.root is not None:
self.root.in_order_traverse(visit_callback, *extra_info)
def post_order_traverse(self, visit_callback, *extra_info):
"""
使用指定访问回调函数对当前树的结点进行后序遍历访问
:param visit_callback:
:param extra_info: 给访问函数的额外参数
:return:
"""
if self.root is not None:
self.root.post_order_traverse(visit_callback, *extra_info)
class HuffmanTreeNode(BinaryTreeNode):
"""
哈夫曼树结点
与一般的二叉树结点不同的是:
1. 这个结点可以进行小于比较,值只能为整数
2. 可以进行加法操作,得到一个新的哈夫曼树结点,其左孩子为原加法中值较小的一方,右孩子为值较大的一方,新结点的值为原两个结点值之和
"""
def __init__(self, name=None, left_node=None, right_node=None, value=None):
super().__init__(left_node, right_node)
if not isinstance(value, int):
raise TypeError
self.name = name
self.value = value
self.child_status = -1
def __repr__(self):
return f"<HuffmanTreeNode name='{self.name}' value='{self.value}'>"
def __lt__(self, other):
return self.value < other.value
def __add__(self, other):
if isinstance(other, HuffmanTreeNode):
min_node = min(self, other)
sum_val = self.value + other.value
if min_node is self:
self.child_status = 0
other.child_status = 1
return HuffmanTreeNode(left_node=self, right_node=other, value=sum_val)
else:
self.child_status = 1
other.child_status = 0
return HuffmanTreeNode(left_node=other, right_node=self, value=sum_val)
raise NotImplementedError
def pre_order_traverse(self, visit_callback, *extra_info):
"""
使用指定的遍历回调函数前序遍历当前结点
:param visit_callback:
:param extra_info:
:return:
"""
# 拷贝当前递归层次的编码信息,这样,即使下一层某个分支对编码信息做了修改,也不会将影响传入到另一分支
extra_info = extra_info[0].copy()
super().pre_order_traverse(visit_callback, extra_info)
class HuffmanTree(BinaryTree):
"""
哈夫曼树
"""
@staticmethod
def create(definition_dict):
"""根据前序字符串二叉树定义创建哈夫曼树"""
node_list = [HuffmanTreeNode(name=name, value=val) for name, val in definition_dict.items() if
name != HuffmanTree.VOID_NODE_PLACEHOLDER] # 忽略空树占位符
while len(node_list) >= 2:
min_node_1 = node_list.pop() if len(node_list) == 1 else node_list.pop(node_list.index(min(*node_list)))
min_node_2 = node_list.pop() if len(node_list) == 1 else node_list.pop(node_list.index(min(*node_list)))
node_list.append(min_node_1 + min_node_2)
# 最终正常情况下,应该还剩下一个结点
if node_list:
return HuffmanTree(node_list.pop())
# 还有一种情况,就是原来列表中就没有任何结点,此时返回空树
return HuffmanTree()
def dump_code_dict(self):
"""获取当前哈夫曼树的哈夫曼编码"""
code_dict = {}
def visit_callback(node, *extra_info):
if node.child_status == -1: # 头结点
return
extra_info[0].append(node.child_status)
if node.name is not None: # 如果名称不为空,则表明该结点是叶子节点
code_dict[node.name] = extra_info[0].copy()
self.pre_order_traverse(visit_callback, [])
return code_dict
|
the-stack_0_20911 | import os
import Database
import encode
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = 'faces'
ALLOWED_EXTENSIONS = set(['jpg', 'png'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
database = "faceStudent.db"
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# Collect Data
name = request.form.get('firstName')
surname = request.form.get('surname')
student_number = request.form.get('number')
# Check if the post request has a file part
if 'file' not in request.files:
flash('No file Part')
return render_template("index.html")
file = request.files['file']
# if user does not select a file, browser also
# submit an empty part without a filename
if file.filename == '':
flash('No file Selected')
return render_template("index.html", message="No file Selected")
if file and allowed_file(file.filename):
# create a database connection
conn = Database.create_connection(database)
the_file_name = student_number + ".jpg"
with conn:
student_data = (student_number, name, surname, the_file_name)
Database.create_student(conn, student_data)
Database.select_all_students(conn)
conn.close()
filename = secure_filename(the_file_name)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Train Face Data
encode.get_encoded_faces()
return redirect(url_for('upload_file', filename=filename))
return render_template("index.html")
|
the-stack_0_20914 | """
Test the multi-objective optimization algorithm.
"""
import matplotlib
matplotlib.use('PS')
import seaborn as sns
sns.set_style("white")
sns.set_context("paper")
import sys
import os
import GPy
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pydes
import numpy as np
from pyDOE import *
from scipy.optimize import minimize
from scipy.optimize import check_grad
from scipy.optimize import approx_fprime
from example_objective_functions import ObjFunc2
import shutil
if __name__ == '__main__':
assert len(sys.argv)==3
sigma = sys.argv[1]
n = int(sys.argv[2])
out_dir = 'ex2_results_n={0:d}_sigma={1:s}'.format(n,sys.argv[1])
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
dim = 6
max_it = 2
obj_funcs = ObjFunc2(sigma=sigma, n_samp=1)
obj_funcs_true = ObjFunc2(sigma=sigma, n_samp=100)
X_init = design.latin_center(n, dim, seed=123455)
Y_init = np.array([obj_funcs(x) for x in X_init])
X_d_true = design.latin_center(10000, 6, seed=12345)
Y_true = np.array([obj_funcs_true(x) for x in X_d_true])
ehvi_opt_bounds = ((0, 1), ) * dim
trans_function = lambda y: y
p = pydes.ParetoFront(X_init, Y_init, obj_funcs, obj_funcs_true,
Y_true=Y_true,
ehvi_opt_bounds=ehvi_opt_bounds,
X_design=1000,
max_it=max_it,
gp_fixed_noise=None,
verbosity=1,
kernel_type=GPy.kern.Matern32,
do_posterior_samples=True,
how='max',
trans_function=trans_function,
lim=None,
pareto_how='max',
figname=os.path.join(out_dir,'ex2'))
p.optimize(plot=True) |
the-stack_0_20917 | # -*- coding: utf-8 -*-
'''
Module for handling openstack keystone calls.
:optdepends: - keystoneclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file:
.. code-block:: yaml
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
OR (for token based authentication)
.. code-block:: yaml
keystone.token: 'ADMIN'
keystone.endpoint: 'http://127.0.0.1:35357/v2.0'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles. For example:
.. code-block:: yaml
openstack1:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the keystone functions can make use
of a configuration profile by declaring it explicitly.
For example:
.. code-block:: bash
salt '*' keystone.tenant_list profile=openstack1
'''
# Import third party libs
HAS_KEYSTONE = False
try:
from keystoneclient.v2_0 import client
import keystoneclient.exceptions
HAS_KEYSTONE = True
except ImportError:
pass
def __virtual__():
'''
Only load this module if keystone
is installed on this minion.
'''
if HAS_KEYSTONE:
return 'keystone'
return False
__opts__ = {}
def auth(profile=None, **connection_args):
'''
Set up keystone credentials
Only intended to be used within Keystone-enabled modules
'''
if profile:
prefix = profile + ":keystone."
else:
prefix = "keystone."
# look in connection_args first, then default to config file
def get(key, default=None):
return connection_args.get('connection_' + key,
__salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', 'ADMIN')
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0/')
insecure = get('insecure', False)
token = get('token')
endpoint = get('endpoint', 'http://127.0.0.1:35357/v2.0')
if token:
kwargs = {'token': token,
'endpoint': endpoint}
else:
kwargs = {'username': user,
'password': password,
'tenant_name': tenant,
'tenant_id': tenant_id,
'auth_url': auth_url}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True
return client.Client(**kwargs)
def ec2_credentials_create(user_id=None, name=None,
tenant_id=None, tenant=None,
profile=None, **connection_args):
'''
Create EC2-compatible credentials for user per tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_create name=admin tenant=admin
salt '*' keystone.ec2_credentials_create \
user_id=c965f79c4f864eaaa9c3b41904e67082 \
tenant_id=722787eb540849158668370dc627ec5f
'''
kstone = auth(profile, **connection_args)
if name:
user_id = user_get(name=name, profile=profile,
**connection_args)[name]['id']
if not user_id:
return {'Error': 'Could not resolve User ID'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
if not tenant_id:
return {'Error': 'Could not resolve Tenant ID'}
newec2 = kstone.ec2.create(user_id, tenant_id)
return {'access': newec2.access,
'secret': newec2.secret,
'tenant_id': newec2.tenant_id,
'user_id': newec2.user_id}
def ec2_credentials_delete(user_id=None, name=None, access_key=None,
profile=None, **connection_args):
'''
Delete EC2-compatible credentials
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_delete \
860f8c2c38ca4fab989f9bc56a061a64 access_key=5f66d2f24f604b8bb9cd28886106f442
salt '*' keystone.ec2_credentials_delete name=admin \
access_key=5f66d2f24f604b8bb9cd28886106f442
'''
kstone = auth(profile, **connection_args)
if name:
user_id = user_get(name=name, profile=None, **connection_args)[name]['id']
if not user_id:
return {'Error': 'Could not resolve User ID'}
kstone.ec2.delete(user_id, access_key)
return 'ec2 key "{0}" deleted under user id "{1}"'.format(access_key,
user_id)
def ec2_credentials_get(user_id=None, name=None, access=None,
profile=None, **connection_args):
'''
Return ec2_credentials for a user (keystone ec2-credentials-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_get c965f79c4f864eaaa9c3b41904e67082 access=722787eb540849158668370dc627ec5f
salt '*' keystone.ec2_credentials_get user_id=c965f79c4f864eaaa9c3b41904e67082 access=722787eb540849158668370dc627ec5f
salt '*' keystone.ec2_credentials_get name=nova access=722787eb540849158668370dc627ec5f
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
if not access:
return {'Error': 'Access key is required'}
ec2_credentials = kstone.ec2.get(user_id=user_id, access=access,
profile=profile, **connection_args)
ret[ec2_credentials.user_id] = {'user_id': ec2_credentials.user_id,
'tenant': ec2_credentials.tenant_id,
'access': ec2_credentials.access,
'secret': ec2_credentials.secret}
return ret
def ec2_credentials_list(user_id=None, name=None, profile=None,
**connection_args):
'''
Return a list of ec2_credentials for a specific user (keystone ec2-credentials-list)
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_list 298ce377245c4ec9b70e1c639c89e654
salt '*' keystone.ec2_credentials_list user_id=298ce377245c4ec9b70e1c639c89e654
salt '*' keystone.ec2_credentials_list name=jack
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
for ec2_credential in kstone.ec2.list(user_id):
ret[ec2_credential.user_id] = {'user_id': ec2_credential.user_id,
'tenant_id': ec2_credential.tenant_id,
'access': ec2_credential.access,
'secret': ec2_credential.secret}
return ret
def endpoint_get(service, profile=None, **connection_args):
'''
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt '*' keystone.endpoint_get nova
'''
kstone = auth(profile, **connection_args)
services = service_list(profile, **connection_args)
if service not in services:
return {'Error': 'Could not find the specified service'}
service_id = services[service]['id']
endpoints = endpoint_list(profile, **connection_args)
for endpoint in endpoints:
if endpoints[endpoint]['service_id'] == service_id:
return endpoints[endpoint]
return {'Error': 'Could not find endpoint for the specified service'}
def endpoint_list(profile=None, **connection_args):
'''
Return a list of available endpoints (keystone endpoints-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.endpoint_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for endpoint in kstone.endpoints.list():
ret[endpoint.id] = {'id': endpoint.id,
'region': endpoint.region,
'adminurl': endpoint.adminurl,
'internalurl': endpoint.internalurl,
'publicurl': endpoint.publicurl,
'service_id': endpoint.service_id}
return ret
def endpoint_create(service, publicurl=None, internalurl=None, adminurl=None,
region=None, profile=None, **connection_args):
'''
Create an endpoint for an Openstack service
CLI Examples:
.. code-block:: bash
salt '*' keystone.endpoint_create nova 'http://public/url'
'http://internal/url' 'http://adminurl/url' region
'''
kstone = auth(profile, **connection_args)
keystone_service = service_get(name=service, **connection_args)
if not keystone_service or 'Error' in keystone_service:
return {'Error': 'Could not find the specified service'}
kstone.endpoints.create(region=region,
service_id=keystone_service[service]['id'],
publicurl=publicurl,
adminurl=adminurl,
internalurl=internalurl)
return endpoint_get(service, **connection_args)
def endpoint_delete(service, profile=None, **connection_args):
'''
Delete endpoints of an Openstack service
CLI Examples:
.. code-block:: bash
salt '*' keystone.endpoint_delete nova
'''
kstone = auth(profile, **connection_args)
endpoint = endpoint_get(service, profile, **connection_args)
if not endpoint or 'Error' in endpoint:
return {'Error': 'Could not find any endpoints for the service'}
kstone.endpoints.delete(endpoint['id'])
endpoint = endpoint_get(service, profile, **connection_args)
if not endpoint or 'Error' in endpoint:
return True
def role_create(name, profile=None, **connection_args):
'''
Create named role
.. code-block:: bash
salt '*' keystone.role_create admin
'''
kstone = auth(profile, **connection_args)
if 'Error' not in role_get(name=name, profile=profile, **connection_args):
return {'Error': 'Role "{0}" already exists'.format(name)}
role = kstone.roles.create(name)
return role_get(name=name, profile=profile, **connection_args)
def role_delete(role_id=None, name=None, profile=None,
**connection_args):
'''
Delete a role (keystone role-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.role_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_delete role_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_delete name=admin
'''
kstone = auth(profile, **connection_args)
if name:
for role in kstone.roles.list():
if role.name == name:
role_id = role.id
break
if not role_id:
return {'Error': 'Unable to resolve role id'}
role = role_get(role_id, profile=profile, **connection_args)
kstone.roles.delete(role)
ret = 'Role ID {0} deleted'.format(role_id)
if name:
ret += ' ({0})'.format(name)
return ret
def role_get(role_id=None, name=None, profile=None, **connection_args):
'''
Return a specific roles (keystone role-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.role_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_get role_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for role in kstone.roles.list():
if role.name == name:
role_id = role.id
break
if not role_id:
return {'Error': 'Unable to resolve role id'}
role = kstone.roles.get(role_id)
ret[role.name] = {'id': role.id,
'name': role.name}
return ret
def role_list(profile=None, **connection_args):
'''
Return a list of available roles (keystone role-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.role_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for role in kstone.roles.list():
ret[role.name] = {'id': role.id,
'name': role.name}
return ret
def service_create(name, service_type, description=None, profile=None,
**connection_args):
'''
Add service to Keystone service catalog
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_create nova compute \
'OpenStack Compute Service'
'''
kstone = auth(profile, **connection_args)
service = kstone.services.create(name, service_type, description)
return service_get(service.id, profile=profile, **connection_args)
def service_delete(service_id=None, name=None, profile=None, **connection_args):
'''
Delete a service from Keystone service catalog
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_delete name=nova
'''
kstone = auth(profile, **connection_args)
if name:
service_id = service_get(name=name, profile=profile,
**connection_args)[name]['id']
service = kstone.services.delete(service_id)
return 'Keystone service ID "{0}" deleted'.format(service_id)
def service_get(service_id=None, name=None, profile=None, **connection_args):
'''
Return a specific services (keystone service-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_get service_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for service in kstone.services.list():
if service.name == name:
service_id = service.id
break
if not service_id:
return {'Error': 'Unable to resolve service id'}
service = kstone.services.get(service_id)
ret[service.name] = {'id': service.id,
'name': service.name,
'type': service.type,
'description': service.description}
return ret
def service_list(profile=None, **connection_args):
'''
Return a list of available services (keystone services-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.service_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for service in kstone.services.list():
ret[service.name] = {'id': service.id,
'name': service.name,
'description': service.description,
'type': service.type}
return ret
def tenant_create(name, description=None, enabled=True, profile=None,
**connection_args):
'''
Create a keystone tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_create nova description='nova tenant'
salt '*' keystone.tenant_create test enabled=False
'''
kstone = auth(profile, **connection_args)
new = kstone.tenants.create(name, description, enabled)
return tenant_get(new.id, profile=profile, **connection_args)
def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args):
'''
Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo
'''
kstone = auth(profile, **connection_args)
if name:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
kstone.tenants.delete(tenant_id)
ret = 'Tenant ID {0} deleted'.format(tenant_id)
if name:
ret += ' ({0})'.format(name)
return ret
def tenant_get(tenant_id=None, name=None, profile=None,
**connection_args):
'''
Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = kstone.tenants.get(tenant_id)
ret[tenant.name] = {'id': tenant.id,
'name': tenant.name,
'description': tenant.description,
'enabled': tenant.enabled}
return ret
def tenant_list(profile=None, **connection_args):
'''
Return a list of available tenants (keystone tenants-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.tenant_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for tenant in kstone.tenants.list():
ret[tenant.name] = {'id': tenant.id,
'name': tenant.name,
'description': tenant.description,
'enabled': tenant.enabled}
return ret
def tenant_update(tenant_id=None, name=None, description=None,
enabled=None, profile=None, **connection_args):
'''
Update a tenant's information (keystone tenant-update)
The following fields may be updated: name, email, enabled.
Can only update name if targeting by ID
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_update name=admin enabled=True
salt '*' keystone.tenant_update c965f79c4f864eaaa9c3b41904e67082 name=admin [email protected]
'''
kstone = auth(profile, **connection_args)
if not tenant_id:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = kstone.tenants.get(tenant_id)
if not name:
name = tenant.name
if not description:
description = tenant.description
if enabled is None:
enabled = tenant.enabled
kstone.tenants.update(tenant_id, name, description, enabled)
def token_get(profile=None, **connection_args):
'''
Return the configured tokens (keystone token-get)
CLI Example:
.. code-block:: bash
salt '*' keystone.token_get c965f79c4f864eaaa9c3b41904e67082
'''
kstone = auth(profile, **connection_args)
token = kstone.service_catalog.get_token()
return {'id': token['id'],
'expires': token['expires'],
'user_id': token['user_id'],
'tenant_id': token['tenant_id']}
def user_list(profile=None, **connection_args):
'''
Return a list of available users (keystone user-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.user_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for user in kstone.users.list():
ret[user.name] = {'id': user.id,
'name': user.name,
'email': user.email,
'enabled': user.enabled}
tenant_id = getattr(user, 'tenantId', None)
if tenant_id:
ret[user.name]['tenant_id'] = tenant_id
return ret
def user_get(user_id=None, name=None, profile=None, **connection_args):
'''
Return a specific users (keystone user-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_get user_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
user = kstone.users.get(user_id)
ret[user.name] = {'id': user.id,
'name': user.name,
'email': user.email,
'enabled': user.enabled}
tenant_id = getattr(user, 'tenantId', None)
if tenant_id:
ret[user.name]['tenant_id'] = tenant_id
return ret
def user_create(name, password, email, tenant_id=None,
enabled=True, profile=None, **connection_args):
'''
Create a user (keystone user-create)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_create name=jack password=zero [email protected] tenant_id=a28a7b5a999a455f84b1f5210264375e enabled=True
'''
kstone = auth(profile, **connection_args)
item = kstone.users.create(name=name,
password=password,
email=email,
tenant_id=tenant_id,
enabled=enabled)
return user_get(item.id, profile=profile, **connection_args)
def user_delete(user_id=None, name=None, profile=None, **connection_args):
'''
Delete a user (keystone user-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_delete user_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_delete name=nova
'''
kstone = auth(profile, **connection_args)
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
kstone.users.delete(user_id)
ret = 'User ID {0} deleted'.format(user_id)
if name:
ret += ' ({0})'.format(name)
return ret
def user_update(user_id=None, name=None, email=None, enabled=None,
tenant=None, profile=None, **connection_args):
'''
Update a user's information (keystone user-update)
The following fields may be updated: name, email, enabled, tenant.
Because the name is one of the fields, a valid user id is required.
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_update user_id=c965f79c4f864eaaa9c3b41904e67082 name=newname
salt '*' keystone.user_update c965f79c4f864eaaa9c3b41904e67082 name=newname [email protected]
'''
kstone = auth(profile, **connection_args)
if not user_id:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
user = kstone.users.get(user_id)
# Keep previous settings if not updating them
if not name:
name = user.name
if not email:
email = user.email
if enabled is None:
enabled = user.enabled
kstone.users.update(user=user_id, name=name, email=email, enabled=enabled)
if tenant:
for t in kstone.tenants.list():
if t.name == tenant:
tenant_id = t.id
break
kstone.users.update_tenant(user_id, tenant_id)
ret = 'Info updated for user ID {0}'.format(user_id)
return ret
def user_verify_password(user_id=None, name=None, password=None,
profile=None, **connection_args):
'''
Verify a user's password
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_verify_password name=test password=foobar
salt '*' keystone.user_verify_password user_id=c965f79c4f864eaaa9c3b41904e67082 password=foobar
'''
kstone = auth(profile, **connection_args)
if 'connection_endpoint' in connection_args:
auth_url = connection_args.get('connection_endpoint')
else:
auth_url = __salt__['config.option']('keystone.endpoint',
'http://127.0.0.1:35357/v2.0')
if user_id:
for user in kstone.users.list():
if user.id == user_id:
name = user.name
break
if not name:
return {'Error': 'Unable to resolve user name'}
kwargs = {'username': name,
'password': password,
'auth_url': auth_url}
try:
userauth = client.Client(**kwargs)
except keystoneclient.exceptions.Unauthorized:
return False
return True
def user_password_update(user_id=None, name=None, password=None,
profile=None, **connection_args):
'''
Update a user's password (keystone user-password-update)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_password_update c965f79c4f864eaaa9c3b41904e67082 password=12345
salt '*' keystone.user_password_update user_id=c965f79c4f864eaaa9c3b41904e67082 password=12345
salt '*' keystone.user_password_update name=nova password=12345
'''
kstone = auth(profile, **connection_args)
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
kstone.users.update_password(user=user_id, password=password)
ret = 'Password updated for user ID {0}'.format(user_id)
if name:
ret += ' ({0})'.format(name)
return ret
def user_role_add(user_id=None, user=None, tenant_id=None,
tenant=None, role_id=None, role=None, profile=None,
**connection_args):
'''
Add role for user in tenant (keystone user-role-add)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_add \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b \
role_id=ce377245c4ec9b70e1c639c89e8cead4
salt '*' keystone.user_role_add user=admin tenant=admin role=admin
'''
kstone = auth(profile, **connection_args)
if user:
user_id = user_get(name=user, profile=profile,
**connection_args)[user]['id']
else:
user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name']
if not user_id:
return {'Error': 'Unable to resolve user id'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
else:
tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name']
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
if role:
role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id']
else:
role = role_get(role_id, profile=profile,
**connection_args).keys()[0]['name']
if not role_id:
return {'Error': 'Unable to resolve role id'}
kstone.roles.add_user_role(user_id, role_id, tenant_id)
ret_msg = '"{0}" role added for user "{1}" for "{2}" tenant'
return ret_msg.format(role, user, tenant)
def user_role_remove(user_id=None, user=None, tenant_id=None,
tenant=None, role_id=None, role=None,
profile=None, **connection_args):
'''
Remove role for user in tenant (keystone user-role-remove)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_remove \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b \
role_id=ce377245c4ec9b70e1c639c89e8cead4
salt '*' keystone.user_role_remove user=admin tenant=admin role=admin
'''
kstone = auth(profile, **connection_args)
if user:
user_id = user_get(name=user, profile=profile,
**connection_args)[user]['id']
else:
user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name']
if not user_id:
return {'Error': 'Unable to resolve user id'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
else:
tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name']
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
if role:
role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id']
else:
role = role_get(role_id).keys()[0]['name']
if not role_id:
return {'Error': 'Unable to resolve role id'}
kstone.roles.remove_user_role(user_id, role_id, tenant_id)
ret_msg = '"{0}" role removed for user "{1}" under "{2}" tenant'
return ret_msg.format(role, user, tenant)
def user_role_list(user_id=None, tenant_id=None, user_name=None,
tenant_name=None, profile=None, **connection_args):
'''
Return a list of available user_roles (keystone user-roles-list)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_list \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b
salt '*' keystone.user_role_list user_name=admin tenant_name=admin
'''
kstone = auth(profile, **connection_args)
ret = {}
if user_name:
for user in kstone.users.list():
if user.name == user_name:
user_id = user.id
break
if tenant_name:
for tenant in kstone.tenants.list():
if tenant.name == tenant_name:
tenant_id = tenant.id
break
if not user_id or not tenant_id:
return {'Error': 'Unable to resolve user or tenant id'}
for role in kstone.roles.roles_for_user(user=user_id, tenant=tenant_id):
ret[role.name] = {'id': role.id,
'name': role.name,
'user_id': user_id,
'tenant_id': tenant_id}
return ret
def _item_list(profile=None, **connection_args):
'''
Template for writing list functions
Return a list of available items (keystone items-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.item_list
'''
kstone = auth(profile, **connection_args)
ret = []
for item in kstone.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'id': item.id,
# 'name': item.name,
# }
return ret
#The following is a list of functions that need to be incorporated in the
#keystone module. This list should be updated as functions are added.
#
#endpoint-create Create a new endpoint associated with a service
#endpoint-delete Delete a service endpoint
#discover Discover Keystone servers and show authentication
# protocols and
#bootstrap Grants a new role to a new user on a new tenant, after
# creating each.
|
the-stack_0_20918 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding a backend to a backend service."""
from apitools.base.py import encoding
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.backend_services import backend_flags
from googlecloudsdk.command_lib.compute.backend_services import backend_services_utils
from googlecloudsdk.command_lib.compute.backend_services import flags
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddBackend(base.UpdateCommand):
"""Add a backend to a backend service.
*{command}* is used to add a backend to a backend service. A
backend is a group of tasks that can handle requests sent to a
backend service. Currently, the group of tasks can be one or
more Google Compute Engine virtual machine instances grouped
together using an instance group.
Traffic is first spread evenly across all virtual machines in
the group. When the group is full, traffic is sent to the next
nearest group(s) that still have remaining capacity.
To modify the parameters of a backend after it has been added
to the backend service, use
`gcloud compute backend-services update-backend` or
`gcloud compute backend-services edit`.
"""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(
parser, operation_type='add to the backend service')
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
def _GetGetRequest(self, client, backend_service_ref):
if backend_service_ref.Collection() == 'compute.regionBackendServices':
return (client.apitools_client.regionBackendServices,
'Get',
client.messages.ComputeRegionBackendServicesGetRequest(
backendService=backend_service_ref.Name(),
region=backend_service_ref.region,
project=backend_service_ref.project))
return (client.apitools_client.backendServices,
'Get',
client.messages.ComputeBackendServicesGetRequest(
backendService=backend_service_ref.Name(),
project=backend_service_ref.project))
def _GetSetRequest(self, client, backend_service_ref, replacement):
if backend_service_ref.Collection() == 'compute.regionBackendServices':
return (client.apitools_client.regionBackendServices,
'Update',
client.messages.ComputeRegionBackendServicesUpdateRequest(
backendService=backend_service_ref.Name(),
backendServiceResource=replacement,
region=backend_service_ref.region,
project=backend_service_ref.project))
return (client.apitools_client.backendServices,
'Update',
client.messages.ComputeBackendServicesUpdateRequest(
backendService=backend_service_ref.Name(),
backendServiceResource=replacement,
project=backend_service_ref.project))
def _CreateBackendMessage(self, messages, group_uri, balancing_mode, args):
"""Create a backend message.
Args:
messages: The avalible API proto messages.
group_uri: String. The backend instance group uri.
balancing_mode: Backend.BalancingModeValueValuesEnum. The backend load
balancing mode.
args: argparse Namespace. The arguments given to the add-backend command.
Returns:
A new Backend message with its fields set according to the given
arguments.
"""
backend_services_utils.ValidateBalancingModeArgs(messages, args)
return messages.Backend(
balancingMode=balancing_mode,
capacityScaler=args.capacity_scaler,
description=args.description,
group=group_uri,
maxRate=args.max_rate,
maxRatePerInstance=args.max_rate_per_instance,
maxUtilization=args.max_utilization,
maxConnections=args.max_connections,
maxConnectionsPerInstance=args.max_connections_per_instance)
def _Modify(self, client, resources, backend_service_ref, args, existing):
replacement = encoding.CopyProtoMessage(existing)
group_ref = flags.MULTISCOPE_INSTANCE_GROUP_ARG.ResolveAsResource(
args,
resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
group_uri = group_ref.SelfLink()
for backend in existing.backends:
if group_uri == backend.group:
if group_ref.Collection() == 'compute.instanceGroups':
scope = 'zone'
elif group_ref.Collection() == 'compute.regionInstanceGroups':
scope = 'region'
raise exceptions.ToolException(
'Backend [{}] in {} [{}] already exists in backend service '
'[{}].'.format(group_ref.Name(),
scope,
getattr(group_ref, scope),
backend_service_ref.Name()))
if args.balancing_mode:
balancing_mode = client.messages.Backend.BalancingModeValueValuesEnum(
args.balancing_mode)
else:
balancing_mode = None
backend = self._CreateBackendMessage(client.messages, group_uri,
balancing_mode, args)
replacement.backends.append(backend)
return replacement
def Run(self, args):
"""Issues requests necessary to add backend to the Backend Service."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
backend_service_ref = (
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client)))
get_request = self._GetGetRequest(client, backend_service_ref)
objects = client.MakeRequests([get_request])
new_object = self._Modify(client, holder.resources, backend_service_ref,
args, objects[0])
return client.MakeRequests(
[self._GetSetRequest(client, backend_service_ref, new_object)])
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class AddBackendBeta(AddBackend):
"""Add a backend to a backend service.
*{command}* is used to add a backend to a backend service. A
backend is a group of tasks that can handle requests sent to a
backend service. Currently, the group of tasks can be one or
more Google Compute Engine virtual machine instances grouped
together using an instance group.
Traffic is first spread evenly across all virtual machines in
the group. When the group is full, traffic is sent to the next
nearest group(s) that still have remaining capacity.
To modify the parameters of a backend after it has been added
to the backend service, use
`gcloud compute backend-services update-backend` or
`gcloud compute backend-services edit`.
"""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(
parser, operation_type='add to the backend service')
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AddBackendAlpha(AddBackendBeta):
"""Add a backend to a backend service.
*{command}* is used to add a backend to a backend service. A
backend is a group of tasks that can handle requests sent to a
backend service. Currently, the group of tasks can be one or
more Google Compute Engine virtual machine instances grouped
together using an instance group.
Traffic is first spread evenly across all virtual machines in
the group. When the group is full, traffic is sent to the next
nearest group(s) that still have remaining capacity.
To modify the parameters of a backend after it has been added
to the backend service, use
`gcloud compute backend-services update-backend` or
`gcloud compute backend-services edit`.
"""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(
parser, operation_type='add to the backend service')
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
backend_flags.AddFailover(parser, default=None)
def _CreateBackendMessage(self, messages, group_uri, balancing_mode, args):
"""Overrides."""
backend_services_utils.ValidateBalancingModeArgs(messages, args)
return messages.Backend(
balancingMode=balancing_mode,
capacityScaler=args.capacity_scaler,
description=args.description,
group=group_uri,
maxRate=args.max_rate,
maxRatePerInstance=args.max_rate_per_instance,
maxUtilization=args.max_utilization,
maxConnections=args.max_connections,
maxConnectionsPerInstance=args.max_connections_per_instance,
failover=args.failover)
|
the-stack_0_20919 | #!/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'rublog'
import requests
import re
import time
import random
import os
import shutil
from tqdm import tqdm
import config
import json
import getpass
def clear():
os.system('cls || clear')
class Renren(object):
def __init__(self):
self.is_login = 0
self.params = {'origURL': 'http://www.renren.com',
'email': config.EMAIL, 'password': config.PASSWORD}
self.get_symbol_code = re.compile('&failCode=(\d+)')
self.user_id = 0
self.login_url = config.LOGINURL
self.localtime = time.localtime()
self.bak_time = time.strftime('%Y-%m-%d %H:%M:%S', self.localtime)
self.this_year = self.localtime[0]
self.cookies = {}
self.s = requests.Session()
self.icode_url = config.ICODEURL
self.rtk = '1000'
self.requestToken = '1000'
self.user_name = 'No name!'
self.tiny_photo_url = 'http://www.renren.com'
self.dir_name = 'temp_name'
# 登陆模块使用post来发送账号密码
def post_data(self, login_url, params):
header = {
'Accept': 'text/html, application/xhtml+xml, */*',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache'
}
o = self.s.post(login_url, data=header, params=params)
return o
# 登陆模块,查看是否有登陆验证码
def login(self):
while not self.is_login:
try_login = self.post_data(self.login_url, self.params)
is_success = re.search('ren\.com/\d+', try_login.url)
if not is_success:
symbol_code = self.get_symbol_code.search(try_login.url)
symbol_code_num = symbol_code.group(1)
if symbol_code_num == '512':
self.i_get_img()
img_content = input('打开文件夹查看并输入验证码:')
self.params['icode'] = img_content
continue
clear()
print(config.FAILCODE[symbol_code_num])
else:
user_page = try_login.url
# print(user_page)
self.is_login = 1
user_id = self.get_user_id(user_page)
# print(user_id)
self.user_id = user_id
user_page = 'http://www.renren.com/' + user_id + '/profile'
# print(user_page)
index = self.open_url(user_page)
# print(index.url)
# print(index.request.headers)
# print(index.headers)
# print(index.content.decode())
self.rtk = self.get_rtk(index.content.decode())
# print(self.rtk)
self.requestToken = self.get_requestToken(
index.content.decode())
self.get_user_tiny_photo_url(index.content.decode())
self.get_user_name(index.content.decode())
# print(self.requestToken)
# return user_id
# else:
# symbol_code = self.get_symbol_code.search(try_login.url)
# symbol_code_num = symbol_code.group(1)
# print(config.FAILCODE[symbol_code_num])
# if symbol_code_num == '512':
# else:
return 0
# 获取登陆验证码
def i_get_img(self):
pic_data = self.open_url(self.icode_url)
# print(pic_data.headers)
# print(pic_data.request.headers)
# print(type(pic_data.content))
with open('icode.jpg', 'wb') as f:
kk = f.read
f.write(pic_data.content)
return 0
# 打开普通网页或者有组合数据的网页
def open_url(self, url, params=0):
# 打开网址并返回解码后的页面(网页源码)
header = {
'Accept': 'text/html, application/xhtml+xml, */*',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache'
}
if not params:
o = self.s.get(url, headers=header)
# print('直接打开')
else:
o = self.s.get(url, params=params, headers=header)
# print('组合网址了')
# o_content_decode = o.content.decode()
# print(o.text)
# print(o.url)
# print(o.headers)
# print(o.content)
# print(o)
return o # .text
# 获取个人首页
def get_user_page(self, shouye):
user_page = re.findall('http://www.renren.com/\d*/profile', shouye)
return user_page
def get_user_tiny_photo_url(self, shouye):
tiny_photo_url = re.findall('src="(http:[\w/\.\-_/]{10,200}\.[gbjp][gifbmpnje]{1,2}[fgp])"\s+id="userpic',
shouye)
# print(tiny_photo_url)
if tiny_photo_url:
self.tiny_photo_url = tiny_photo_url[0]
return tiny_photo_url[0]
return 1000
def get_user_name(self, shouye):
user_name = re.findall(
'avatar_title\sno_auth">([\s\S]*?)<span>', shouye)
if user_name:
self.user_name = user_name[0].strip()
return user_name[0].strip()
return 1000
# 获取个人的人人id
def get_user_id(self, user_page):
user_id = re.findall('\d+', user_page)
return user_id[0]
def get_requestToken(self, shouye):
find_requestToken = re.findall("requestToken\s:\s'(-*\d+)'", shouye)
if find_requestToken:
requestToken = find_requestToken[0]
return requestToken
else:
return 1000
def get_rtk(self, shouye):
find_rtk = re.findall("_rtk\s:\s'(\w+)'", shouye)
if find_rtk:
rtk = find_rtk[0]
return rtk
else:
return 1000
# 在每个月的页面上截取每个微博的那段源代码
def get_detailpage_in_monthly_page(self, monthly_page):
detailpage_in_monthly_page = re.findall(
'<section id="newsfeed[\s|\S]+?</section>', monthly_page)
return detailpage_in_monthly_page
# 如果一条微博获取正常就进行保存一次,到html中
def save_every_weibo(self, detailpage_list, file_name):
for detailpage in detailpage_list:
weibo_our_keep = self.join_weibo(detailpage)
if weibo_our_keep == 0:
continue
self.save_html(weibo_our_keep, file_name)
# print('ever been here!')
return 0
# 获取每条微博的发布时间,人人网没具体到时间点,只给到日期
def get_weibo_time(self, detail_page):
pre_first_time = re.findall(
'<input\stype="hidden"\svalue="\d{4}-\d{2}-\d{2}', detail_page)
first_time = re.findall('\d{4}-\d{2}-\d{2}', pre_first_time[0])
# print(pre_first_time)
# print(first_time)
return first_time[0]
# 获取每条微博的内容
def get_weibo_content(self, detail_page):
pre_content = re.findall(
'<div\sclass="content-main">[\S\s]+?</article>', detail_page)
if len(pre_content) == 0:
return []
content = re.sub('</h4>', 'brbrbr', pre_content[0])
content = re.sub('</span>', 'brbrbr', content)
content = re.sub('<[\s\S]+?>', '', content)
content = re.sub('[\s]{3,}', '', content)
content = re.sub('brbrbr', '<br>', content)
# print(content)
# print(pre_content)
return content
# 去除一个字符串中的某些字符
def get_rid(self, all_string, wipe_string):
all_string = all_string.lstrip(wipe_string)
all_string = all_string.rstrip(wipe_string)
return all_string
# 获取每一条回复
def get_replys(self, detail_page):
pre_reply = re.findall(
'class="reply-content">[\s|\S]*?</a>: [\s|\S]*?</p>\s*<div\sclass="bottom-bar">\s*<span\sclass="time">\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}</span>',
detail_page)
# print(pre_reply)
replys = []
for reply in pre_reply:
pre_man = re.findall('_blank">([\s|\S]*?)</a', reply)
man = pre_man[0]
# print(man)
pre_reply_content = re.findall('nbsp;([\s|\S]*?)</', reply)
reply_content = pre_reply_content[0]
reply_content = re.sub('<a[\S\s]+?>', '', reply_content)
reply_time = re.findall('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}', reply)
full_reply = ' ' + man + ': ' + \
reply_content + ' @' + reply_time[0]
replys.append(full_reply)
# print(replys)
return replys
# 组合微博到特定的格式
def join_weibo(self, detail_page):
time1 = self.get_weibo_time(detail_page)
content1 = self.get_weibo_content(detail_page)
if len(content1) == 0:
return 0
reply1 = self.get_replys(detail_page)
text1 = """<div class="content">
<div class="time">
<span>"""
text2 = """</span>
</div>
<div class="weibo">
<p class="weibo-text">"""
text3 = """ </p>
</div>\n"""
text4 = """ <div class="reply">
<p class="reply-text">"""
text5 = """\n </p>
</div>\n"""
text6 = """</div>"""
weibo_one_html = text1 + time1 + text2 + content1 + text3
for reply in reply1:
weibo_one_html = weibo_one_html + text4 + reply + text5
weibo = weibo_one_html + text6 + '\n'
weibo = self.repl_img_url(weibo)
return weibo
# 保存特定格式的微博
def save_html(self, weibo_our_keep, file_name):
with open(file_name, 'a+', encoding="utf-8") as f:
f.write(weibo_our_keep)
return 0
# 创建基本的html的头部
def create_basic_html(self, file_name):
header = """<html>
<head>
<meta name="Description" content="人人网 校内------曾经的一个朋友">
<meta name="Keywords" content="Xiaonei,Renren,校内,大学,同学,人人,人人网">
<title>人人网/校内网---曾经的一个朋友</title><meta charset="utf-8">
</head>
<body>
<div class="body-head">
<li class="daohang-item">
<h2>人人网/校内网---曾经的一个朋友</h2>
</li>
<li class="daohang-item">
<a href="./index.html">首页</a> |
<a href="./shuo.html" target="_blank">微博</a> |
<a href="./blog.html" target="_blank">博文</a> |
<a href="./album.html" target="_blank">相册</a>
</li>
</div>
<div class="body-main">
<br>
"""
with open(file_name, 'w+', encoding="utf-8") as f:
f.write(header)
f.close
return 0
# 第一层文件夹下的网址/bak和第二层文件夹下的网址/bak/blog中的html的超链接稍微有点不同,所以新开一个method
def create_sub_folder_basic_html(self, file_name):
header = """<html>
<head>
<meta name="Description" content="人人网 校内------曾经的一个朋友">
<meta name="Keywords" content="Xiaonei,Renren,校内,大学,同学,人人,人人网">
<title>人人网/校内网---曾经的一个朋友</title><meta charset="utf-8">
</head>
<body>
<div class="body-head">
<li class="daohang-item">
<h2>人人网/校内网---曾经的一个朋友</h2>
</li>
<li class="daohang-item">
<a href="../index.html">首页</a> |
<a href="../shuo.html" target="_blank">微博</a> |
<a href="../blog.html" target="_blank">博文</a> |
<a href="../album.html" target="_blank">相册</a>
</li>
</div>
<div class="body-main">
<br>
"""
with open(file_name, 'w+', encoding="utf-8") as f:
f.write(header)
f.close
return 0
def create_weibo_page_head(self):
blog_list_title = """ <h3>说说列表</h3>"""
with open('shuo.html', 'a+', encoding='utf-8') as f:
f.write(blog_list_title)
return 0
# 循环打开所有月份的人人网页的说说并下载保存
def all_year_and_month(self):
print('正在保存人人网状态/说说/微博')
self.create_basic_html('shuo.html')
self.create_weibo_page_head()
weibo_urls = 'http://www.renren.com/timelinefeedretrieve.do'
param = {"ownerid": " ", "render": "0", "begin": "0", "limit": "100", "year": "2012", "month": "1",
"isAdmin": "false"}
# print(self.cookies)
param['ownerid'] = self.user_id
for year in range(2016, 2007, -1):
for month in tqdm(range(12, 0, -1)):
param['year'] = year
param['month'] = month
detailpage_in_monthly_page = self.open_url(weibo_urls, param)
# print(detailpage_in_monthly_page.content.url)
a_month_weibo_list = self.get_detailpage_in_monthly_page(
detailpage_in_monthly_page.content.decode())
# print(a_month_weibo_list)
self.save_every_weibo(a_month_weibo_list, 'shuo.html')
time.sleep(random.randint(1, 4))
self.full_fill_html('shuo.html')
return 0
# 替换掉备份中的图片链接,并且下载图片
def repl_img_url(self, strings, blog=0):
# print(strings)
strings = re.sub('\?ver=\d', '', strings)
img_url = re.findall(
'src=["\'](http:[\w/\.\-/]{9,200}\.[gbjp][gifbmpnje]{1,2}[fgp])', strings)
# print(img_url)
# print(len(img_url))
for url in img_url:
try:
self.download_img(url)
except Exception as e:
print(e)
# print(url)
if blog:
strings = re.sub('src="http://[\s\S]+?/', 'src="../pic/', strings)
strings = re.sub(
'src=\'http://[\s\S]+?/', 'src=\'../pic/', strings)
else:
strings = re.sub('src="http://[\s\S]+?/', 'src="./pic/', strings)
strings = re.sub('src=\'http://[\s\S]+?/', 'src=\'./pic/', strings)
strings = re.sub('thumbnail=["\'][\s\S]+?["\']', '', strings)
return strings
# 根据给定网址,下载图片并且保存到相关文件夹
def download_img(self, img_url):
if img_url.count('/') < 4:
return 0
k = img_url.split('/')
i = 3
img_name = './pic'
for x in range(3, len(k) - 1):
img_name = img_name + '/' + k[x]
# print(img_name)
if not os.path.exists(img_name):
os.makedirs(img_name)
# print(img_name)
img_name = img_name + '/' + k[-1]
# print(img_name)
if os.path.isfile(img_name):
return 0
pic_data = self.open_url(img_url)
with open(img_name, 'wb') as f:
kk = f.read
f.write(pic_data.content)
return 0
# 把评论组合成特定格式的html源码
def join_comment(self, comment):
comment_template = '<div class="comment" id={0}><span uid="{1}">{2}<span>:{3}@{4}</div>'
comment_in_htmls = comment_template.format(
comment['commentId'], comment['authorId'], comment[
'authorName'], comment['content'], comment['time']
)
comment_in_html = self.repl_img_url(comment_in_htmls, 1)
return comment_in_html
# 根据给定的文章或者相片id获取相关的评论,如果是文章blog默认为0或指定为0,相片指定为非0非none非空,一般指定1
def get_comment(self, page_id, blog=0):
param = {"limit": "20", "desc": "true", "offset": "0", "replaceUBBLarge": "true", "type": "blog",
"entryId": page_id, "entryOwnerId": self.user_id, "requestToken": self.requestToken, "_rtk": self.rtk}
comment_url = 'http://comment.renren.com/comment/xoa2'
if blog:
param["type"] = 'photo'
comments_page = self.open_url(comment_url, param)
comments_page = comments_page.content.decode()
comments_list = json.loads(comments_page)
# print(comments_list)
comments = ''
if comments_list and comments_list.get('commentCount'):
for comment in comments_list.get('comments'):
comments = comments + self.join_comment(comment)
return comments
# 根据博客的id获取博客内容,因为博客id是json格式并经过变换的,博客id小于99999999的是因为经过压缩,省略了末尾的0
def get_blog_content(self, blog_id):
while int(blog_id) < 99999999:
blog_id = int(blog_id) * 10
blog_id = str(blog_id)
blog_url = 'http://blog.renren.com/blog/' + self.user_id + '/' + blog_id
blog_page = self.open_url(blog_url)
blog_page = blog_page.content.decode()
# print(blog_page)
patten = '<div\sid="blogContent"\sclass="blogDetail-content"[\s\S]+?>([\s\S]+?)</div>'
blog_content = re.findall(patten, blog_page)
# print(blog_content)
if len(blog_content) == 0:
return 0
else:
blog_content = self.repl_img_url(blog_content[0], 1)
return blog_content
# 根据给定的博客id和博客描述的list来获取博客内容并组合成特定的html源码格式
def save_single_blog_page(self, blog_id, summary):
if not os.path.exists('./blog'):
os.makedirs('./blog')
file_name = './blog/%i.html' % (blog_id)
self.create_sub_folder_basic_html(file_name)
blog_content = self.get_blog_content(blog_id)
comments = self.get_comment(blog_id)
blog_template = '{0}<div class="blog_content">{1}<br><h4>评论、留言</h4>{2}</div></body></html>'
blog = blog_template.format(summary, blog_content, comments)
# text1 = '"""<'div class="blog_discribe">
# <h3><a href="./blog/""" + blog_id + """.html">""" + blog_tuple[5] + """</a></h3>
# </div>
# <div class="blog_discribe">
# 发布:20""" + str(blog_tuple[1]) + """ 阅读:""" + str(blog_tuple[2]) + """ 回复:""" + str(blog_tuple[3]) + """
# </div>
# <div class="blog_content">""" + str(blog_content) + """<br>
# <h4>评论、留言</h4>
# """ + str(comments) + """
# </div>
# </body>
# </html>"""
self.save_html(blog, file_name)
return 0
# 根据博客网址保存单独的一篇博客,因为因为博客id比较特殊的原因极有可能一两篇博客不能保存,
# 只好补充一个保存单篇的method,获取tuple过程中直接就截取了博客内容,评论数目暂时没法获取,
# 已知bug,博客显示页面上没有评论数的源码,需要另写method来获取评论数
def save_a_single_blog(self, a_blog_url):
if not os.path.exists('./blog'):
os.makedirs('./blog')
blog_id = re.findall('blog\.renren\.com/blog/\d+/(\d+)', a_blog_url)
if blog_id:
blog_id = blog_id[0]
else:
print('请输入正确的博客网址!')
return 0
file_name = './blog/' + blog_id + '.html'
self.create_sub_folder_basic_html(file_name)
comments = self.get_comment(blog_id)
blog_tuple = self.get_blog_tuple(blog_id)
text1 = """ <div class="blog_discribe">
<h3><a href="./blog/""" + blog_id + """.html">""" + blog_tuple[5] + """</a></h3>
</div>
<div class="blog_discribe">
发布:""" + str(blog_tuple[1]) + """ 阅读:""" + str(blog_tuple[2]) + """ 回复:""" + str(blog_tuple[3]) + """
</div>
<div class="blog_content">""" + str(blog_tuple[0]) + """<br>
<h4>评论、留言</h4>
""" + str(comments) + """
</div>
</body>
</html>"""
self.save_html(text1, file_name)
clear()
print('*************************************************************')
print(' ****已保存人人网这篇博文,请继续保存其他或输入5退出**** ')
print('*************************************************************')
print(' ')
print(' ')
return 0
# 根据博客id获取博客标题、发布时间、内容、阅读数
def get_blog_tuple(self, blog_id):
while int(blog_id) < 99999999:
blog_id = int(blog_id) * 10
blog_id = str(blog_id)
blog_url = 'http://blog.renren.com/blog/' + self.user_id + '/' + blog_id
blog_page = self.open_url(blog_url)
blog_page = blog_page.content.decode()
# print(blog_page)
blog_title = re.findall(
'class="blogDetail-title">([\s\S]+?)<', blog_page)
blog_time = re.findall('createTime":"(\d+)"', blog_page)
blog_read_num = re.findall(
'blogDetail-readerNum-num">(\d+)<', blog_page)
patten = '<div\sid="blogContent"\sclass="blogDetail-content"[\s\S]+?>([\s\S]+?)</div>'
blog_content = re.findall(patten, blog_page)
# print(blog_content)
blog_tuple = ['none content', '2038-12-31 23:59:59',
'0', '0', blog_id, 'fail']
if len(blog_content) == 0:
blog_tuple[0] = blog_url + '博文内容未正确获取'
else:
blog_tuple[0] = self.repl_img_url(blog_content[0], 1)
if len(blog_title) == 0:
fail = blog_url + '未正确获取'
blog_tuple[5] = fail
else:
blog_tuple[5] = blog_title[0]
if len(blog_time):
blog_tuple[1] = time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime(int(blog_time[0]) / 1000))
if len(blog_title):
blog_tuple[2] = blog_read_num[0]
return blog_tuple
# 把文章summary组合成特定的html源码格式
def join_blog_list(self, blog):
if blog:
summary_template = '''
<div class="blog_discribe" data-id="{0}"><a href="./blog/{0}.html">{1}"</a></div>
<div class="blog_discribe"> 发布:20 {2} 阅读:{3} 回复:{4} 赞:{5}</div>
'''
blog_id = int(blog['id'])
summary = summary_template.format(blog_id, blog['title'],
blog['createTime'], blog['readCount'], blog['commentCount'], blog['likeCount'])
self.save_single_blog_page(blog_id, summary)
summary += '<div class="blog_summary">%s</div><br><hr>' % (blog[
'summary'])
return summary
return None
# 根据博客总篇数获取博客列表的总页数
def get_blog_list_page_num(self):
blog_start_url = 'http://blog.renren.com/blog/' + \
str(self.user_id) + '/myBlogs'
blog_start_page = self.open_url(blog_start_url)
# print(blog_start_page.content.decode())
blog_start_page_content_decode = blog_start_page.content.decode()
all_blog_num = re.findall(
'itemCount="(\d+)"', blog_start_page_content_decode)
# print(all_blog_num)
if all_blog_num:
all_blog_page_num = int(int(all_blog_num[0]) / 10)
# print(all_blog_page_num)
return all_blog_page_num
return 0
# 为博客列表创建h3标题
def create_blog_list_page_head(self):
self.create_basic_html('blog.html')
blog_list_title = """ <h3>博客文章列表</h3>
<br>
"""
with open('blog.html', 'a+', encoding='utf-8') as f:
f.write(blog_list_title)
return 0
# 获取summary页面中谋篇博客的信息并组合成list
def get_blog_content_list(self, blog_list_url, blog_param):
blog_list = self.open_url(blog_list_url, blog_param)
blog_list_content = blog_list.content.decode()
blog_content_list = json.loads(blog_list_content)
return blog_content_list.get('data')
# 把summary页面的博客的summary组合成特定的html源码格式
def save_blog_in_a_page(self, blog_content_list):
blog_in_a_page = ""
for blog in blog_content_list:
summary = self.join_blog_list(blog)
blog_in_a_page = blog_in_a_page + summary
# print(blog_in_a_page)
with open('blog.html', 'a+', encoding='utf-8') as f:
f.write(blog_in_a_page)
return 0
# 为网页补充完整源代码
def full_fill_html(self, file_name):
blog_list_end_html = """ </body>
</html>"""
with open(file_name, 'a+', encoding='utf-8') as f:
f.write(blog_list_end_html)
return 0
# 调用相关功能,保存所有的博客文章,略微控制速度(随机暂停4~8秒),因为我发现请求同一个人速度太快了就被block了
def all_blogs(self):
print('正在保存人人网文章/日志/博客')
self.create_blog_list_page_head()
all_blog_page_num = self.get_blog_list_page_num()
# print(all_blog_page_num)
blog_list_url = 'http://blog.renren.com/blog/' + \
str(self.user_id) + '/blogs'
# print(blog_list_url)
blog_param = {"categoryId": " ", "curpage": "0",
"requestToken": self.requestToken, "_rtk": self.rtk}
# print(blog_param)
for page_num in tqdm((0, all_blog_page_num + 1)):
blog_param["curpage"] = page_num
blog_content_list = self.get_blog_content_list(
blog_list_url, blog_param)
self.save_blog_in_a_page(blog_content_list)
time.sleep(random.randint(3, 5))
self.full_fill_html('blog.html')
return 0
# 为相册列表添加h3标题
def create_album_list_page_head(self):
self.create_basic_html('album.html')
album_list_title = """ <h3>相册列表</h3>"""
with open('album.html', 'a+', encoding='utf-8') as f:
f.write(album_list_title)
return 0
# 创建相册头部html并且添加标题
def create_album_page_head(self, album_id, album_name):
if not os.path.exists('./album'):
os.makedirs('./album')
self.create_sub_folder_basic_html(
'./album/album-' + album_id + '.html')
album_list_title = ' <h3>' + album_name + '</h3>'
with open('./album/album-' + album_id + '.html', 'a+', encoding='utf-8') as f:
f.write(album_list_title)
return 0
# 把html源码的相册下载替换掉相关的链接并保存到文件
def save_photo_in_html(self, album_id, photo_in_html):
photo_in_html = self.repl_img_url(photo_in_html, 1)
with open('./album/album-' + album_id + '.html', 'a+', encoding='utf-8') as f:
f.write(photo_in_html)
return 0
# 获取相册列表和相关信息(相册名称、相册id、相册图片数量)
def get_album_content_list(self, album_list_url):
album_content_list_sourse = self.open_url(album_list_url)
album_content_list_decode = album_content_list_sourse.content.decode()
album_content_list = re.findall('albumName":"([\s\S]+?)","albumId":"(\d+)"[\s\S]+?photoCount":(\d+),',
album_content_list_decode)
# print(album_content_list)
return album_content_list
# 获取相关照片的描述
# 已知bug,描述开头用数字或者字母或者特殊符号,没法处理混码,备份后显示的是Unicode编码
def get_photo_discribe(self, photo_id):
# photo_url = 'http://photo.renren.com/photo/' + self.user_id + '/photo-' + photo_id + '/v7'
photo_url = 'http://photo.renren.com/photo/' + \
self.user_id + '/photo-' + photo_id + '/layer'
photo_sourse = self.open_url(photo_url)
photo_sourse_decode = photo_sourse.content.decode()
# print(photo_sourse_decode)
photo_info = json.loads(photo_sourse_decode)
currentPhoto = photo_info.get('currentPhoto')
if currentPhoto is not None:
return currentPhoto.get('originTitle') or '本图没有标题'
else:
return '本图没有标题'
# 保存相册并组合成html源码格式
def save_album(self, album_id, album_name):
album_url = 'http://photo.renren.com/photo/' + \
self.user_id + '/album-' + album_id + '/v7'
# print(album_url)
album_sourse = self.open_url(album_url)
album_sourse_decode = album_sourse.content.decode()
# print(album_sourse_decode)
photo_in_a_album = re.findall(
'photoId":"(\d+)"[\s\S]+?createTime\\\\":\\\\"(\d+)\\\\"[\s\S]+?url":"(http:[\\\\\\\\\w/\.-_]{10,200}\.[gbjp][gifbmpnje]{1,2}[fgp])"',
album_sourse_decode)
# print(photo_in_a_album)
for photo in photo_in_a_album:
photo_id = photo[0]
photo_discribe = self.get_photo_discribe(photo_id)
photo_comments = self.get_comment(photo_id, 1)
create_time = photo[1]
l = int(create_time) / 1000
k = time.localtime(l)
create_time = time.strftime('%Y-%m-%d %H:%M:%S', k)
photo_url = photo[2]
# print(photo_url)
# print(type(photo_url))
photo_url = photo_url.replace('\\', '')
photo_in_html = """ <div class="photo">
<p><img """ + 'src="' + photo_url + '" alt="' + photo_discribe + '" /><br><a>' + photo_discribe + """</a>
</p>
</div>
<br>""" + photo_comments + """
<br>
<br>"""
self.save_photo_in_html(album_id, photo_in_html)
self.full_fill_html('./album/album-' + album_id + '.html')
time.sleep(random.randint(0, 3))
return 0
# 保存相册的列表页面
def save_album_list(self, album_content_list):
album_list_in_html = ""
if album_content_list:
for album_name in tqdm(album_content_list):
album_name = list(album_name)
if album_name[0].startswith('\\u'):
album_name[0] = album_name[0].encode(
'latin-1').decode('unicode_escape')
self.create_album_page_head(album_name[1], album_name[0])
self.save_album(album_name[1], album_name[0])
album_list_in_html = album_list_in_html + """ <div class="album_name">
<p><a href="./album/album-""" + album_name[1] + """.html">""" + album_name[0] + '</a> 共' + album_name[
2] + '张 </p>' + """
</div>
"""
with open('album.html', 'a+', encoding='utf-8') as f:
f.write(album_list_in_html)
return 0
# 保存某用户的所有相册
def all_album(self):
print('正在保存人人网相册')
self.create_album_list_page_head()
album_list_url = 'http://photo.renren.com/photo/' + \
str(self.user_id) + '/albumlist/v7'
# print(album_list_url)
album_content_list = self.get_album_content_list(album_list_url)
# print(blog_content_list)
self.save_album_list(album_content_list)
self.full_fill_html('album.html')
return 0
# 请用户输入账号、密码
def get_user_account_and_pw(self):
print("人人网、校内备份脚本write by rublog")
account_tips = "请输入人人网账号并按回车:"
pw_tips = "请输入人人网密码并回车:"
account = input(account_tips)
pw = getpass.getpass(pw_tips)
self.params['email'] = account
self.params['password'] = pw
return 0
# 创建用户的主页,很简单,只有相片名字和备份时间
def make_index(self):
self.create_basic_html('index.html')
index_content = """
<h3>""" + self.user_name + '的人人网备份' + """</h3>
<div class="index_content">
<div class="bak_time">
<span>""" + self.user_name + '备份于' + self.bak_time + """</span>
</div>
<div class="tiny_photo">
<p class="tiny_photo">
<img src=\"""" + self.tiny_photo_url + """\" alt=\"""" + self.user_name + """\" /> </p>
</div>
</div>
<div class="index_content">
<li class="daohang-item">
<a href="./index.html">首页</a> |
<a href="./shuo.html" target="_blank">微博</a> |
<a href="./blog.html" target="_blank">博文</a> |
<a href="./album.html" target="_blank">相册</a>
</li>
</div>
</body>
</html>"""
index_content = self.repl_img_url(index_content)
with open('index.html', 'a+', encoding='utf-8') as f:
f.write(index_content)
self.dir_name = "人人网" + self.user_name + '资料备份' + \
time.strftime('%Y-%m-%d', self.localtime)
if not os.path.exists(self.dir_name):
os.makedirs(self.dir_name)
return 0
def replace_guest_info(self, user_id):
user_page = 'http://www.renren.com/' + user_id + '/profile'
# print(user_page)
index = self.open_url(user_page)
# print(index.url)
# print(index.request.headers)
# print(index.headers)
self.get_user_tiny_photo_url(index.content.decode())
self.get_user_name(index.content.decode())
return 0
def pack_up(self):
file_list = ['index.html', 'album.html',
'blog.html', 'shuo.html', 'album', 'blog', 'pic']
for file in file_list:
try:
shutil.move(file, self.dir_name + '/' + file)
except:
continue
finally:
pass
return 0
def main():
tips = """人人网、校内备份脚本 write by rublog
因人人网改版及个人精力原因无法保证本脚本一直能
正常使用,本人亦未收取您的任何费用,恕无力保持
脚本能正常使用。
本人保证原版脚本不存在任何上传个人账号行为,请在
本人网站或者本人github上下载使用,其他不做任何
保证。"""
print(tips)
ren = Renren()
u_id = ren.login()
# ren.save_album('1134407597', '用了太多的js')
# print(ren.tiny_photo_url)
# print(ren.user_name)
# ren.make_index()
# lol = input('stop here!')
# 如果没登录就请用户输入账号、密码直到登陆成功
while not ren.is_login:
ren.get_user_account_and_pw()
u_id = ren.login()
clear()
choice_tips = """人人网、校内备份脚本 write by rublog
本脚本部分控制网页请求速度但未使用多线程,
(0)、备份人人网说说、博客、相册
(1)、备份人人网说说
(2)、备份人人网博客
(3)、备份人人网相册
(4)、备份人人网单篇博客
(5)、退出
(6)、以我之权限保存某朋友在人人网说说、博客和相册
请输入选项数字1~6并按回车:"""
# a_month_page = ren.all_year_and_month()
# 上面一行是下载所有的人人说说/微博
# ren.all_blogs()
# 上面一行是下载所有的人人日志、博客
# co = ren.all_album()
# print(co)
# 功能选择程序段
end = 1
while end:
kk = input(choice_tips)
print(kk)
try:
kk = int(kk)
except:
kk = 10
if kk == 0:
clear()
print('*************************************************************')
print(' *****正在保存人人网说说、博客和相册,请稍候.......***** ')
print('*************************************************************')
print(' ')
print(' ')
ren.make_index()
a_month_page = ren.all_year_and_month()
hh = ren.all_blogs()
co = ren.all_album()
clear()
print('*************************************************************')
print(' *******已保存人人网说说、博客和相册,请输入5退出******* ')
print('*************************************************************')
print(' ')
print(' ')
elif kk == 1:
clear()
print('***********************************************************')
print(' **********正在保存人人网说说,请稍候.......********** ')
print('***********************************************************')
print(' ')
print(' ')
ren.make_index()
a_month_page = ren.all_year_and_month()
clear()
print('*************************************************************')
print(' ******已保存人人网说说,请继续保存其他或输入5退出****** ')
print('*************************************************************')
print(' ')
print(' ')
elif kk == 2:
clear()
print('***********************************************************')
print(' **********正在保存人人网博客,请稍候.......********** ')
print('***********************************************************')
print(' ')
print(' ')
ren.make_index()
hh = ren.all_blogs()
clear()
print('*************************************************************')
print(' ******已保存人人网博客,请继续保存其他或输入5退出****** ')
print('*************************************************************')
print(' ')
print(' ')
elif kk == 3:
clear()
print('***********************************************************')
print(' **********正在保存人人网相册,请稍候.......********** ')
print('***********************************************************')
print(' ')
print(' ')
ren.make_index()
co = ren.all_album()
clear()
print('*************************************************************')
print(' ******已保存人人网相册,请继续保存其他或输入5退出****** ')
print('*************************************************************')
print(' ')
print(' ')
elif kk == 4:
clear()
print('*************************************************************')
print(' ******************请输入博客网址.......****************** ')
print('*************************************************************')
print(' ')
print(' ')
a_blog_url = input('请输入博客网址:')
ren.make_index()
a_month_page = ren.save_a_single_blog(a_blog_url)
elif kk == 5:
print('正在打包备份的文件,请稍候......')
ren.pack_up()
print('正在退出,请稍候.......')
time.sleep(2)
end = 0
elif kk == 6:
clear()
print('*************************************************************')
print('*************以我之权限保存某朋友在人人网说说、博客和相册**********')
print('*******************请输入Ta的user_id**************************')
print('********http://www.renren.com/653334272784/profile***********')
print('*********上面的653334272784就是一个user_id示例*****************')
print('*******************请输入Ta的user_id**************************')
print('*************************************************************')
print(' ')
print(' ')
self_user_id_bak = ren.user_id
self_user_name_bak = ren.user_name
self_tiny_photo_url_bak = ren.tiny_photo_url
ren.user_id = input('请输入Ta的user_id:')
ren.replace_guest_info(ren.user_id)
ren.make_index()
a_month_page = ren.all_year_and_month()
hh = ren.all_blogs()
co = ren.all_album()
ren.pack_up()
clear()
print('************************************************************')
print(' **********************已保存*************************** ')
print(' ********您能看到的Ta在人人网的说说、博客和相册************* ')
print(' *********************请输入5退出************************ ')
print('*************************************************************')
print(' ')
print(' ')
ren.user_id = self_user_id_bak
ren.user_name = self_user_name_bak
ren.tiny_photo_url = self_tiny_photo_url_bak
else:
clear()
print('************************************************')
print(' **********输入有误,请重新输入!********** ')
print('************************************************')
print(' ')
print(' ')
continue
# main程序
if __name__ == '__main__':
main()
|
the-stack_0_20920 | from __future__ import print_function
import os
import sys
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer, ThreadingMixIn
else:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer, ThreadingMixIn
from os import chdir, path
import threading
import time
HTTP_PORT = 14563
class CustomHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
# These methods get rid of errors messages caused by javaws closing the socket immediately
def handle_one_request(self):
try:
SimpleHTTPRequestHandler.handle_one_request(self)
except:
pass
def finish(self):
try:
SimpleHTTPRequestHandler.finish(self)
except:
pass
# Added this method from Jython's SimpleHTTPRequestHandler for compatibility with Jython on Windows OS
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
try:
fs = os.fstat(f.fileno())
except (OSError, AttributeError):
# Jython on Windows lands here when f.fileno() is invalid
fs = os.stat(path)
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
class FileServer(ThreadingMixIn, TCPServer):
allow_reuse_address = True
def __init__(self):
pass
def start(self):
TCPServer.__init__(self, ('localhost', int(HTTP_PORT)), CustomHandler)
self.RESOURCE_LOCATION = path.abspath(path.dirname(__file__))
print("Server serving from DocumentRoot:" + self.RESOURCE_LOCATION)
chdir(self.RESOURCE_LOCATION)
server_thread = threading.Thread(name='test_file_server', target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
def stop(self):
if hasattr(self, 'shutdown'):
self.shutdown()
else:
self.server_close()
print("Server stopped")
if __name__ == '__main__':
fs = FileServer()
fs.start()
while True:
time.sleep(10)
# fs.stop()
|
the-stack_0_20921 | """
Handling test data.
"""
import os
import hashlib
def data_path(data_name: str) -> str:
"""
Load test data.
"""
data_path = os.path.join(os.path.split(__file__)[0], "data", data_name)
assert os.path.exists(data_path)
return data_path
def get_sha256(path: str) -> str:
"""Calculates a sha256 hash of the file."""
sha256 = hashlib.sha256()
with open(path, "rb") as f:
while True:
data = f.read(65536)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
|
the-stack_0_20923 | # -*- coding: utf-8 -*-
"""The fake file-like object implementation."""
import os
from dfvfs.file_io import file_io
from dfvfs.lib import errors
class FakeFile(file_io.FileIO):
"""Class that implements a fake file-like object."""
def __init__(self, resolver_context, file_data):
"""Initializes the file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
file_data: the fake file data.
"""
super(FakeFile, self).__init__(resolver_context)
self._current_offset = 0
self._file_data = file_data
self._size = 0
def _Close(self):
"""Closes the file-like object.
Raises:
IOError: if the close failed.
"""
return
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec: optional path specification (instance of path.PathSpec).
The default is None.
mode: optional file access mode. The default is 'rb' read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec:
raise ValueError(u'Missing path specfication.')
if path_spec.HasParent():
raise errors.PathSpecError(u'Unsupported path specification with parent.')
location = getattr(path_spec, u'location', None)
if location is None:
raise errors.PathSpecError(u'Path specification missing location.')
self._current_offset = 0
self._size = len(self._file_data)
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size: Optional integer value containing the number of bytes to read.
Default is all remaining data (None).
Returns:
A byte string containing the data read.
Raises:
IOError: if the read failed.
"""
if not self._is_open:
raise IOError(u'Not opened.')
if self._current_offset < 0:
raise IOError(
u'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._file_data is None or self._current_offset >= self._size:
return b''
if size is None:
size = self._size
if self._current_offset + size > self._size:
size = self._size - self._current_offset
start_offset = self._current_offset
self._current_offset += size
return self._file_data[start_offset:self._current_offset]
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks an offset within the file-like object.
Args:
offset: The offset to seek.
whence: Optional value that indicates whether offset is an absolute
or relative position within the file. Default is SEEK_SET.
Raises:
IOError: if the seek failed.
"""
if not self._is_open:
raise IOError(u'Not opened.')
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._size
elif whence != os.SEEK_SET:
raise IOError(u'Unsupported whence.')
if offset < 0:
raise IOError(u'Invalid offset value less than zero.')
self._current_offset = offset
def get_offset(self):
"""Returns the current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError(u'Not opened.')
return self._current_offset
def get_size(self):
"""Returns the size of the file-like object.
Raises:
IOError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError(u'Not opened.')
return self._size
|
the-stack_0_20925 | """Given a file of names and a file of salutations to give each
name, make an output file with greetings for each name in the input file.
This script demonstrates the use of single-valued keyword-style parameters
to Kive pipelines.
"""
import argparse
import csv
import typing as ty
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--names", type=argparse.FileType())
PARSER.add_argument("--salutations", type=argparse.FileType())
PARSER.add_argument("outputfile", type=argparse.FileType("w"))
def greet(name: str, salutation: str = "Hello") -> str:
return f"{salutation} {name}!"
def get_salutations(inputfile: ty.Optional[ty.TextIO]) -> ty.Dict[str, str]:
if inputfile is not None:
rdr = csv.DictReader(inputfile)
return {row["name"]: row["salutation"] for row in rdr}
else:
return {
"Grace Hopper": "Oh my goodness, it's Admiral",
"Radia Perlman": "Introducing the inventor of the spanning-tree protocol,",
}
def get_names(inputfile: ty.Optional[ty.TextIO]) -> ty.Iterable[str]:
if inputfile is not None:
rdr = csv.DictReader(inputfile)
yield from (r["name"] for r in rdr)
else:
yield from iter(["Abraham", "Bud", "Charlize", "Radia Perlman"])
def main() -> None:
args = PARSER.parse_args()
names = get_names(args.names)
salutations = get_salutations(args.salutations)
output_writer = csv.DictWriter(args.outputfile, fieldnames=["greeting"])
output_writer.writeheader()
for name in names:
output_writer.writerow(
{"greeting": greet(name, salutations.get(name, "Hello"))}
)
if __name__ == "__main__":
main()
|
the-stack_0_20928 | import collections
from typing import Dict, List, Optional, Tuple
import numpy as np
import logging
from ray.tune import trial_runner
from ray.tune.result import DEFAULT_METRIC
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
from ray.tune.trial import Trial
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
# Implementation notes:
# This implementation contains 3 logical levels.
# Each HyperBand iteration is a "band". There can be multiple
# bands running at once, and there can be 1 band that is incomplete.
#
# In each band, there are at most `s` + 1 brackets.
# `s` is a value determined by given parameters, and assigned on
# a cyclic basis.
#
# In each bracket, there are at most `n(s)` trials, indicating that
# `n` is a function of `s`. These trials go through a series of
# halving procedures, dropping lowest performers. Multiple
# brackets are running at once.
#
# Trials added will be inserted into the most recent bracket
# and band and will spill over to new brackets/bands accordingly.
#
# This maintains the bracket size and max trial count per band
# to 5 and 117 respectively, which correspond to that of
# `max_attr=81, eta=3` from the blog post. Trials will fill up
# from smallest bracket to largest, with largest
# having the most rounds of successive halving.
class HyperBandScheduler(FIFOScheduler):
"""Implements the HyperBand early stopping algorithm.
HyperBandScheduler early stops trials using the HyperBand optimization
algorithm. It divides trials into brackets of varying sizes, and
periodically early stops low-performing trials within each bracket.
To use this implementation of HyperBand with Tune, all you need
to do is specify the max length of time a trial can run `max_t`, the time
units `time_attr`, the name of the reported objective value `metric`,
and if `metric` is to be maximized or minimized (`mode`).
We automatically determine reasonable values for the other
HyperBand parameters based on the given values.
For example, to limit trials to 10 minutes and early stop based on the
`episode_mean_reward` attr, construct:
``HyperBand('time_total_s', 'episode_reward_mean', max_t=600)``
Note that Tune's stopping criteria will be applied in conjunction with
HyperBand's early stopping mechanisms.
See also: https://people.eecs.berkeley.edu/~kjamieson/hyperband.html
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (int): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
The scheduler will terminate trials after this time has passed.
Note that this is different from the semantics of `max_t` as
mentioned in the original HyperBand paper.
reduction_factor (float): Same as `eta`. Determines how sharp
the difference is between bracket space-time allocation ratios.
stop_last_trials (bool): Whether to terminate the trials after
reaching max_t. Defaults to True.
"""
_supports_buffered_results = False
def __init__(self,
time_attr: str = "training_iteration",
metric: Optional[str] = None,
mode: Optional[str] = None,
max_t: int = 81,
reduction_factor: float = 3,
stop_last_trials: bool = True):
assert max_t > 0, "Max (time_attr) not valid!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
FIFOScheduler.__init__(self)
self._eta = reduction_factor
self._s_max_1 = int(
np.round(np.log(max_t) / np.log(reduction_factor))) + 1
self._max_t_attr = max_t
# bracket max trials
self._get_n0 = lambda s: int(
np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
# bracket initial iterations
self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
self._hyperbands = [[]] # list of hyperband iterations
self._trial_info = {} # Stores Trial -> Bracket, Band Iteration
# Tracks state for new trial add
self._state = {"bracket": None, "band_idx": 0}
self._num_stopped = 0
self._metric = metric
self._mode = mode
self._metric_op = None
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
self._stop_last_trials = stop_last_trials
def set_search_properties(self, metric: Optional[str],
mode: Optional[str]) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
"""Adds new trial.
On a new trial add, if current bracket is not filled,
add to current bracket. Else, if current band is not filled,
create new bracket, add to current bracket.
Else, create new iteration, create new bracket, add to bracket."""
if not self._metric or not self._metric_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.run()`".format(self.__class__.__name__, self._metric,
self._mode))
cur_bracket = self._state["bracket"]
cur_band = self._hyperbands[self._state["band_idx"]]
if cur_bracket is None or cur_bracket.filled():
retry = True
while retry:
# if current iteration is filled, create new iteration
if self._cur_band_filled():
cur_band = []
self._hyperbands.append(cur_band)
self._state["band_idx"] += 1
# cur_band will always be less than s_max_1 or else filled
s = len(cur_band)
assert s < self._s_max_1, "Current band is filled!"
if self._get_r0(s) == 0:
logger.info("Bracket too small - Retrying...")
cur_bracket = None
else:
retry = False
cur_bracket = self._create_bracket(s)
cur_band.append(cur_bracket)
self._state["bracket"] = cur_bracket
self._state["bracket"].add_trial(trial)
self._trial_info[trial] = cur_bracket, self._state["band_idx"]
def _create_bracket(self, s):
return Bracket(
time_attr=self._time_attr,
max_trials=self._get_n0(s),
init_t_attr=self._get_r0(s),
max_t_attr=self._max_t_attr,
eta=self._eta,
s=s,
stop_last_trials=self._stop_last_trials)
def _cur_band_filled(self) -> bool:
"""Checks if the current band is filled.
The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1
def on_trial_result(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket)
logger.debug(f"{action} for {trial} on "
f"{self._time_attr}={result.get(self._time_attr)}")
return action
def _process_bracket(self, trial_runner: "trial_runner.TrialRunner",
bracket: "Bracket") -> str:
"""This is called whenever a trial makes progress.
When all live trials in the bracket have no more iterations left,
Trials will be successively halved. If bracket is done, all
non-running trials will be stopped and cleaned up,
and during each halving phase, bad trials will be stopped while good
trials will return to "PENDING"."""
action = TrialScheduler.PAUSE
if bracket.cur_iter_done():
if bracket.finished():
bracket.cleanup_full(trial_runner)
return TrialScheduler.STOP
good, bad = bracket.successive_halving(self._metric,
self._metric_op)
# kill bad trials
self._num_stopped += len(bad)
for t in bad:
if t.status == Trial.PAUSED:
trial_runner.stop_trial(t)
elif t.status == Trial.RUNNING:
bracket.cleanup_trial(t)
action = TrialScheduler.STOP
else:
raise TuneError(f"Trial with unexpected bad status "
f"encountered: {t.status}")
# ready the good trials - if trial is too far ahead, don't continue
for t in good:
if t.status not in [Trial.PAUSED, Trial.RUNNING]:
raise TuneError(f"Trial with unexpected good status "
f"encountered: {t.status}")
if bracket.continue_trial(t):
if t.status == Trial.PAUSED:
self._unpause_trial(trial_runner, t)
elif t.status == Trial.RUNNING:
action = TrialScheduler.CONTINUE
return action
def on_trial_remove(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
"""Notification when trial terminates.
Trial info is removed from bracket. Triggers halving if bracket is
not finished."""
bracket, _ = self._trial_info[trial]
bracket.cleanup_trial(trial)
if not bracket.finished():
self._process_bracket(trial_runner, bracket)
def on_trial_complete(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict):
"""Cleans up trial info from bracket if trial completed early."""
self.on_trial_remove(trial_runner, trial)
def on_trial_error(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
"""Cleans up trial info from bracket if trial errored early."""
self.on_trial_remove(trial_runner, trial)
def choose_trial_to_run(
self, trial_runner: "trial_runner.TrialRunner") -> Optional[Trial]:
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in sorted(
scrubbed, key=lambda b: b.completion_percentage()):
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources_for_trial(trial)):
return trial
return None
def debug_string(self) -> str:
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands))
for i, band in enumerate(self._hyperbands):
out += "\nRound #{}:".format(i)
for bracket in band:
if bracket:
out += "\n {}".format(bracket)
return out
def state(self) -> Dict[str, int]:
return {
"num_brackets": sum(len(band) for band in self._hyperbands),
"num_stopped": self._num_stopped
}
def _unpause_trial(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
trial_runner.trial_executor.unpause_trial(trial)
class Bracket:
"""Logical object for tracking Hyperband bracket progress. Keeps track
of proper parameters as designated by HyperBand.
Also keeps track of progress to ensure good scheduling.
"""
def __init__(self,
time_attr: str,
max_trials: int,
init_t_attr: int,
max_t_attr: int,
eta: float,
s: int,
stop_last_trials: bool = True):
self._live_trials = {} # maps trial -> current result
self._all_trials = []
self._time_attr = time_attr # attribute to
self._n = self._n0 = max_trials
self._r = self._r0 = init_t_attr
self._max_t_attr = max_t_attr
self._cumul_r = self._r0
self._eta = eta
self._halves = s
self._total_work = self._calculate_total_work(self._n0, self._r0, s)
self._completed_progress = 0
self.stop_last_trials = stop_last_trials
def add_trial(self, trial: Trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial)
def cur_iter_done(self) -> bool:
"""Checks if all iterations have completed.
TODO(rliaw): also check that `t.iterations == self._r`"""
return all(
self._get_result_time(result) >= self._cumul_r
for result in self._live_trials.values())
def finished(self) -> bool:
if not self.stop_last_trials:
return False
return self._halves == 0 and self.cur_iter_done()
def current_trials(self) -> List[Trial]:
return list(self._live_trials)
def continue_trial(self, trial: Trial) -> bool:
result = self._live_trials[trial]
if not self.stop_last_trials and self._halves == 0:
return True
elif self._get_result_time(result) < self._cumul_r:
return True
return False
def filled(self) -> bool:
"""Checks if bracket is filled.
Only let new trials be added at current level minimizing the need
to backtrack and bookkeep previous medians."""
return len(self._live_trials) == self._n
def successive_halving(self, metric: str, metric_op: float
) -> Tuple[List[Trial], List[Trial]]:
if self._halves == 0 and not self.stop_last_trials:
return self._live_trials, []
assert self._halves > 0
self._halves -= 1
self._n /= self._eta
self._n = int(np.ceil(self._n))
self._r *= self._eta
self._r = int(min(self._r, self._max_t_attr - self._cumul_r))
self._cumul_r = self._r
sorted_trials = sorted(
self._live_trials,
key=lambda t: metric_op * self._live_trials[t][metric])
good, bad = sorted_trials[-self._n:], sorted_trials[:-self._n]
return good, bad
def update_trial_stats(self, trial: Trial, result: Dict):
"""Update result for trial. Called after trial has finished
an iteration - will decrement iteration count.
TODO(rliaw): The other alternative is to keep the trials
in and make sure they're not set as pending later."""
assert trial in self._live_trials
assert self._get_result_time(result) >= 0
observed_time = self._get_result_time(result)
last_observed = self._get_result_time(self._live_trials[trial])
delta = observed_time - last_observed
if delta <= 0:
logger.info("Restoring from a previous point in time. "
"Previous={}; Now={}".format(last_observed,
observed_time))
self._completed_progress += delta
self._live_trials[trial] = result
def cleanup_trial(self, trial: Trial):
"""Clean up statistics tracking for terminated trials (either by force
or otherwise).
This may cause bad trials to continue for a long time, in the case
where all the good trials finish early and there are only bad trials
left in a bracket with a large max-iteration."""
assert trial in self._live_trials
del self._live_trials[trial]
def cleanup_full(self, trial_runner: "trial_runner.TrialRunner"):
"""Cleans up bracket after bracket is completely finished.
Lets the last trial continue to run until termination condition
kicks in."""
for trial in self.current_trials():
if (trial.status == Trial.PAUSED):
trial_runner.stop_trial(trial)
def completion_percentage(self) -> float:
"""Returns a progress metric.
This will not be always finish with 100 since dead trials
are dropped."""
if self.finished():
return 1.0
return min(self._completed_progress / self._total_work, 1.0)
def _get_result_time(self, result: Dict) -> float:
if result is None:
return 0
return result[self._time_attr]
def _calculate_total_work(self, n: int, r: float, s: int):
work = 0
cumulative_r = r
for _ in range(s + 1):
work += int(n) * int(r)
n /= self._eta
n = int(np.ceil(n))
r *= self._eta
r = int(min(r, self._max_t_attr - cumulative_r))
return work
def __repr__(self) -> str:
status = ", ".join([
"Max Size (n)={}".format(self._n),
"Milestone (r)={}".format(self._cumul_r),
"completed={:.1%}".format(self.completion_percentage())
])
counts = collections.Counter([t.status for t in self._all_trials])
trial_statuses = ", ".join(
sorted("{}: {}".format(k, v) for k, v in counts.items()))
return "Bracket({}): {{{}}} ".format(status, trial_statuses)
|
the-stack_0_20929 | from fosscord.ext import commands
import fosscord
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or("$"))
async def on_ready(self):
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
# Define a simple View that gives us a confirmation menu
class Confirm(fosscord.ui.View):
def __init__(self):
super().__init__()
self.value = None
# When the confirm button is pressed, set the inner value to `True` and
# stop the View from listening to more input.
# We also send the user an ephemeral message that we're confirming their choice.
@fosscord.ui.button(label="Confirm", style=fosscord.ButtonStyle.green)
async def confirm(
self, button: fosscord.ui.Button, interaction: fosscord.Interaction
):
await interaction.response.send_message("Confirming", ephemeral=True)
self.value = True
self.stop()
# This one is similar to the confirmation button except sets the inner value to `False`
@fosscord.ui.button(label="Cancel", style=fosscord.ButtonStyle.grey)
async def cancel(self, button: fosscord.ui.Button, interaction: fosscord.Interaction):
await interaction.response.send_message("Cancelling", ephemeral=True)
self.value = False
self.stop()
bot = Bot()
@bot.command()
async def ask(ctx: commands.Context):
"""Asks the user a question to confirm something."""
# We create the view and assign it to a variable so we can wait for it later.
view = Confirm()
await ctx.send("Do you want to continue?", view=view)
# Wait for the View to stop listening for input...
await view.wait()
if view.value is None:
print("Timed out...")
elif view.value:
print("Confirmed...")
else:
print("Cancelled...")
bot.run("token")
|
the-stack_0_20930 | """Support for Ubiquiti mFi switches."""
import logging
from mficlient.client import FailedToLogin, MFiClient
import requests
import voluptuous as vol
from openpeerpower.components.switch import PLATFORM_SCHEMA, SwitchEntity
from openpeerpower.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import openpeerpower.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = True
SWITCH_MODELS = ["Outlet", "Output 5v", "Output 12v", "Output 24v", "Dimmer Switch"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(opp, config, add_entities, discovery_info=None):
"""Set up mFi sensors."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_tls = config[CONF_SSL]
verify_tls = config.get(CONF_VERIFY_SSL)
default_port = 6443 if use_tls else 6080
port = int(config.get(CONF_PORT, default_port))
try:
client = MFiClient(
host, username, password, port=port, use_tls=use_tls, verify=verify_tls
)
except (FailedToLogin, requests.exceptions.ConnectionError) as ex:
_LOGGER.error("Unable to connect to mFi: %s", str(ex))
return False
add_entities(
MfiSwitch(port)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SWITCH_MODELS
)
class MfiSwitch(SwitchEntity):
"""Representation of an mFi switch-able device."""
def __init__(self, port):
"""Initialize the mFi device."""
self._port = port
self._target_state = None
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._port.ident
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.output
def update(self):
"""Get the latest state and update the state."""
self._port.refresh()
if self._target_state is not None:
self._port.data["output"] = float(self._target_state)
self._target_state = None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.control(True)
self._target_state = True
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.control(False)
self._target_state = False
@property
def current_power_w(self):
"""Return the current power usage in W."""
return int(self._port.data.get("active_pwr", 0))
@property
def extra_state_attributes(self):
"""Return the state attributes for the device."""
return {
"volts": round(self._port.data.get("v_rms", 0), 1),
"amps": round(self._port.data.get("i_rms", 0), 1),
}
|
the-stack_0_20931 | """
Copyright (c) 2021 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
from absl import app
from datetime import date, timedelta
from core.orbit_e2e import E2ETestSuite
from test_cases.capture_window import Capture, CheckTimers, CheckThreadStates, FilterTracks, \
ToggleCollapsedStateOfAllTracks, VerifyTracksExist
from test_cases.connection_window import ConnectToStadiaInstance, FilterAndSelectFirstProcess, LoadCapture, \
LoadLatestCapture
from test_cases.live_tab import AddIterator, VerifyFunctionCallCount
from test_cases.main_window import EndSession
"""Verify loading a capture in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started. Further, Orbit needs to be started.
Also, the captures directory should be cleared.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test it needs
to be run from 64 bit python.
This automation script covers a basic workflow:
- load an old, unsupported capture and verify this fails with a message
- load a supported capture
- verify that the scheduler track is present and contains timers
- verify that the frame track is present and contains timers
- verify that the tracks from manual instrumentation are present
- verify that the memory tracks are present
- verify that an iterator can be added to "TestFunc2"
- verify that "TestFunc2" was called exactly 1257 times
- take a capture and verify there is a corresponding capture in the latest captures list which contains the tracks
"""
def main(argv):
# During the tests, we want to verify that captures get automatically saved. We will do so by filtering the recent
# captures list with the current date (in addition to also deleting old captures before this script runs). However,
# if it is around midnight when this code gets executed and we store the date string, it can be that the capture
# actually gets taken on the next day. Therefore, we will also check for the next day.
today = date.today()
tomorrow = today + timedelta(days=1)
today_string = today.strftime("%Y_%m_%d")
tomorrow_string = tomorrow.strftime("%Y_%m_%d")
test_cases = [
LoadCapture(capture_file_path="test_data\\OrbitTest_1-64.orbit", expect_fail=True),
LoadCapture(capture_file_path="test_data\\OrbitTest_1-72.orbit"),
FilterTracks(filter_string="Scheduler", expected_track_count=1),
CheckTimers(track_name_filter='Scheduler*'),
FilterTracks(filter_string="Frame", expected_track_count=1),
CheckTimers(track_name_filter='Frame track*'), # Verify the frame track has timers
FilterTracks(filter_string="DynamicName_", expected_track_count=5),
FilterTracks(filter_string="_var", expected_track_count=6),
FilterTracks(filter_string="OrbitThread_", expected_track_count=1),
ToggleCollapsedStateOfAllTracks(),
CheckTimers(track_name_filter="OrbitThread_*"),
CheckThreadStates(track_name_filter='OrbitThread_*'),
FilterTracks(filter_string="ORBIT_ASYNC_TASKS", expected_track_count=1),
CheckTimers(track_name_filter="ORBIT_ASYNC_TASKS"),
FilterTracks(filter_string="ORBIT_START_ASYNC_TEST", expected_track_count=1),
CheckTimers(track_name_filter="ORBIT_START_ASYNC_TEST"),
FilterTracks(filter_string=""),
VerifyTracksExist(track_names=["Page*", "*System*", "*CGroup*"], allow_duplicates=True),
AddIterator(function_name="TestFunc2"),
VerifyFunctionCallCount(function_name="TestFunc2", min_calls=1257, max_calls=1257),
# Let's take a capture with the current version and verify this can be loaded
EndSession(),
ConnectToStadiaInstance(),
FilterAndSelectFirstProcess(process_filter="hello_ggp"),
Capture(),
VerifyTracksExist(track_names="hello_ggp_stand*", allow_duplicates=True),
EndSession(),
# If we took the capture around midnight, we need to ensure to also look for the next day. Remember, the strings
# get created before the tests run. Thus the `today_string` might be actually from the day before the capture
# gets auto-saved.
LoadLatestCapture(filter_strings=[f"hello_ggp_stand_{today_string}", f"hello_ggp_stand_{tomorrow_string}"]),
VerifyTracksExist(track_names="hello_ggp_stand*", allow_duplicates=True)
]
suite = E2ETestSuite(test_name="Capture Loading", test_cases=test_cases)
suite.execute()
if __name__ == '__main__':
app.run(main)
|
the-stack_0_20933 | import base64
import os
import pytest
import rancher
import time
from .common import create_config_file
from .common import create_user
from .common import random_test_name
from .common import readDataFile
from .common import run_command_with_stderr
from .common import set_url_password_token
from lib.aws import AmazonWebServices
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
RANCHER_CHART_VERSION = os.environ.get("RANCHER_CHART_VERSION")
test_run_id = random_test_name("auto")
# if hostname is not provided, generate one ( to support onTag )
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX", test_run_id)
resource_suffix = test_run_id + "-" + RANCHER_HOSTNAME_PREFIX
RANCHER_HA_HOSTNAME = os.environ.get("RANCHER_HA_HOSTNAME", RANCHER_HOSTNAME_PREFIX + ".qa.rancher.space")
RANCHER_IMAGE_TAG = os.environ.get("RANCHER_IMAGE_TAG")
RANCHER_SERVER_URL = "https://" + RANCHER_HA_HOSTNAME
RANCHER_HELM_REPO = os.environ.get("RANCHER_HELM_REPO", "latest")
RANCHER_LETSENCRYPT_EMAIL = os.environ.get("RANCHER_LETSENCRYPT_EMAIL")
# cert type for HA install [rancher-self-signed, byo-valid, byo-self-signed, letsencrypt]
RANCHER_HA_CERT_OPTION = os.environ.get("RANCHER_HA_CERT_OPTION", "rancher-self-signed")
RANCHER_VALID_TLS_CERT = os.environ.get("RANCHER_VALID_TLS_CERT")
RANCHER_VALID_TLS_KEY = os.environ.get("RANCHER_VALID_TLS_KEY")
RANCHER_BYO_TLS_CERT = os.environ.get("RANCHER_BYO_TLS_CERT")
RANCHER_BYO_TLS_KEY = os.environ.get("RANCHER_BYO_TLS_KEY")
RANCHER_PRIVATE_CA_CERT = os.environ.get("RANCHER_PRIVATE_CA_CERT")
RANCHER_HA_KUBECONFIG = os.environ.get("RANCHER_HA_KUBECONFIG")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
kubeconfig_path = DATA_SUBDIR + "/kube_config_cluster-ha-filled.yml"
export_cmd = "export KUBECONFIG=" + kubeconfig_path
def test_create_ha(precheck_certificate_options):
cm_install = True
if "byo-" in RANCHER_HA_CERT_OPTION:
cm_install = False
ha_setup(install_cm=cm_install)
install_rancher()
ha_finalize()
def test_upgrade_ha(precheck_upgrade_options):
write_kubeconfig()
add_repo_create_namespace()
install_rancher(upgrade=True)
def ha_setup(install_cm=True):
print(RANCHER_HA_HOSTNAME)
nodes = create_resources()
rke_config = create_rke_cluster_config(nodes)
create_rke_cluster(rke_config)
if install_cm == True:
install_cert_manager()
add_repo_create_namespace()
def ha_finalize():
set_url_and_password()
print_kubeconfig()
def create_resources():
# Create nlb and grab ARN & dns name
lb = AmazonWebServices().create_network_lb(name="nlb-" + resource_suffix)
lbArn = lb["LoadBalancers"][0]["LoadBalancerArn"]
lbDns = lb["LoadBalancers"][0]["DNSName"]
# Upsert the route53 record -- if it exists, update, if not, insert
AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME,
lbDns)
# Create the target groups
targetGroup80 = AmazonWebServices(). \
create_ha_target_group(80, "tg-80-" + resource_suffix)
targetGroup443 = AmazonWebServices(). \
create_ha_target_group(443, "tg-443-" + resource_suffix)
targetGroup80Arn = targetGroup80["TargetGroups"][0]["TargetGroupArn"]
targetGroup443Arn = targetGroup443["TargetGroups"][0]["TargetGroupArn"]
# Create listeners for the load balancer, to forward to the target groups
AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
port=80,
targetGroupARN=targetGroup80Arn)
AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
port=443,
targetGroupARN=targetGroup443Arn)
targets = []
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
3, resource_suffix, wait_for_ready=True)
assert len(aws_nodes) == 3
for aws_node in aws_nodes:
print(aws_node.public_ip_address)
targets.append(aws_node.provider_node_id)
# Register the nodes to the target groups
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list,
targetGroup80Arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list,
targetGroup443Arn)
return aws_nodes
def install_cert_manager():
helm_certmanager_cmd = \
export_cmd + " && " + \
"kubectl apply -f " + \
"https://raw.githubusercontent.com/jetstack/cert-manager/" + \
"release-0.12/deploy/manifests/00-crds.yaml && " + \
"kubectl create namespace cert-manager && " + \
"helm_v3 repo add jetstack https://charts.jetstack.io && " + \
"helm_v3 repo update && " + \
"helm_v3 install cert-manager jetstack/cert-manager " + \
"--namespace cert-manager --version v0.12.0"
run_command_with_stderr(helm_certmanager_cmd)
time.sleep(120)
def add_repo_create_namespace(repo=RANCHER_HELM_REPO):
helm_repo_cmd = \
export_cmd + " && helm_v3 repo add rancher-" + repo + \
" https://releases.rancher.com/server-charts/" + repo + " && " + \
"helm_v3 repo update"
run_command_with_stderr(helm_repo_cmd)
helm_init_cmd = \
export_cmd + \
" && kubectl create namespace cattle-system"
run_command_with_stderr(helm_init_cmd)
def install_rancher(type=RANCHER_HA_CERT_OPTION, repo=RANCHER_HELM_REPO, upgrade=False):
operation = "install"
if upgrade == True:
operation = "upgrade"
helm_rancher_cmd = \
export_cmd + " && helm_v3 " + operation + " rancher " + \
"rancher-" + repo + "/rancher " + \
"--version " + RANCHER_CHART_VERSION + " " \
"--namespace cattle-system " + \
"--set hostname=" + RANCHER_HA_HOSTNAME
if type == 'letsencrypt':
helm_rancher_cmd = helm_rancher_cmd + \
" --set ingress.tls.source=letsEncrypt " + \
"--set letsEncrypt.email=" + RANCHER_LETSENCRYPT_EMAIL
elif type == 'byo-self-signed':
helm_rancher_cmd = helm_rancher_cmd + \
" --set ingress.tls.source=secret " + \
"--set privateCA=true"
elif type == 'byo-valid':
helm_rancher_cmd = helm_rancher_cmd + \
" --set ingress.tls.source=secret"
if RANCHER_IMAGE_TAG != "" and RANCHER_IMAGE_TAG is not None:
helm_rancher_cmd = helm_rancher_cmd + \
" --set rancherImageTag=" + RANCHER_IMAGE_TAG
if operation == "install":
if type == "byo-self-signed":
create_tls_secrets(valid_cert=False)
elif type == "byo-valid":
create_tls_secrets(valid_cert=True)
run_command_with_stderr(helm_rancher_cmd)
time.sleep(120)
def create_tls_secrets(valid_cert):
cert_path = DATA_SUBDIR + "/tls.crt"
key_path = DATA_SUBDIR + "/tls.key"
ca_path = DATA_SUBDIR + "/cacerts.pem"
if valid_cert == True:
# write files from env var
write_encoded_certs(cert_path, RANCHER_VALID_TLS_CERT)
write_encoded_certs(key_path, RANCHER_VALID_TLS_KEY)
else:
write_encoded_certs(cert_path, RANCHER_BYO_TLS_CERT)
write_encoded_certs(key_path, RANCHER_BYO_TLS_KEY)
write_encoded_certs(ca_path, RANCHER_PRIVATE_CA_CERT)
tls_command = export_cmd + " && kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=" + cert_path + " --key=" + key_path
ca_command = export_cmd + " && kubectl -n cattle-system create secret generic tls-ca --from-file=" + ca_path
run_command_with_stderr(tls_command)
if valid_cert == False:
run_command_with_stderr(ca_command)
def write_encoded_certs(path, contents):
file = open(path, "w")
file.write(base64.b64decode(contents).decode("utf-8"))
file.close()
def write_kubeconfig():
file = open(kubeconfig_path, "w")
file.write(base64.b64decode(RANCHER_HA_KUBECONFIG).decode("utf-8"))
file.close()
def set_url_and_password():
admin_token = set_url_password_token(RANCHER_SERVER_URL)
admin_client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
token=admin_token, verify=False)
AUTH_URL = RANCHER_SERVER_URL + \
"/v3-public/localproviders/local?action=login"
user, user_token = create_user(admin_client, AUTH_URL)
env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + admin_token + "'\n"
env_details += "env.USER_TOKEN='" + user_token + "'\n"
create_config_file(env_details)
def create_rke_cluster(config_path):
rke_cmd = "rke --version && rke up --config " + config_path
run_command_with_stderr(rke_cmd)
def print_kubeconfig():
kubeconfig_file = open(kubeconfig_path, "r")
kubeconfig_contents = kubeconfig_file.read()
kubeconfig_file.close()
kubeconfig_contents_encoded = base64.b64encode(kubeconfig_contents.encode("utf-8")).decode("utf-8")
print("\n\n" + kubeconfig_contents + "\n\n")
print("\nBase64 encoded: \n\n" + kubeconfig_contents_encoded + "\n\n")
def create_rke_cluster_config(aws_nodes):
configfile = "cluster-ha.yml"
rkeconfig = readDataFile(DATA_SUBDIR, configfile)
rkeconfig = rkeconfig.replace("$ip1", aws_nodes[0].public_ip_address)
rkeconfig = rkeconfig.replace("$ip2", aws_nodes[1].public_ip_address)
rkeconfig = rkeconfig.replace("$ip3", aws_nodes[2].public_ip_address)
rkeconfig = rkeconfig.replace("$internalIp1",
aws_nodes[0].private_ip_address)
rkeconfig = rkeconfig.replace("$internalIp2",
aws_nodes[1].private_ip_address)
rkeconfig = rkeconfig.replace("$internalIp3",
aws_nodes[2].private_ip_address)
rkeconfig = rkeconfig.replace("$AWS_SSH_KEY_NAME", AWS_SSH_KEY_NAME)
clusterfilepath = DATA_SUBDIR + "/" + "cluster-ha-filled.yml"
f = open(clusterfilepath, "w")
f.write(rkeconfig)
f.close()
return clusterfilepath
@pytest.fixture(scope='module')
def precheck_certificate_options():
if RANCHER_HA_CERT_OPTION == 'byo-valid':
if RANCHER_VALID_TLS_CERT == '' or RANCHER_VALID_TLS_KEY == '' or \
RANCHER_VALID_TLS_CERT is None or RANCHER_VALID_TLS_KEY is None:
raise pytest.skip('Valid certificates not found in environment variables')
elif RANCHER_HA_CERT_OPTION == 'byo-self-signed':
if RANCHER_BYO_TLS_CERT == '' or RANCHER_BYO_TLS_KEY == '' or RANCHER_PRIVATE_CA_CERT == '' or \
RANCHER_BYO_TLS_CERT is None or RANCHER_BYO_TLS_KEY is None or RANCHER_PRIVATE_CA_CERT is None:
raise pytest.skip('Self signed certificates not found in environment variables')
elif RANCHER_HA_CERT_OPTION == 'letsencrypt':
if RANCHER_LETSENCRYPT_EMAIL == '' or RANCHER_LETSENCRYPT_EMAIL is None:
raise pytest.skip('LetsEncrypt email is not found in environment variables')
@pytest.fixture(scope='module')
def precheck_upgrade_options():
if RANCHER_HA_KUBECONFIG == '' or RANCHER_HA_KUBECONFIG is None:
raise pytest.skip('Kubeconfig is not found for upgrade!')
if RANCHER_HA_HOSTNAME == '' or RANCHER_HA_HOSTNAME is None:
raise pytest.skip('Hostname is not found for upgrade!') |
the-stack_0_20937 | from torch.utils.data import Dataset
import wget, os
import pandas as pd
import torch, numpy as np
class HeartDataset(Dataset):
def __init__(self, root):
fname = os.path.join(root, 'HeartDisease.csv')
if not os.path.exists(fname):
raise('Please download the data from https://www.kaggle.com/sonumj/heart-disease-dataset-from-uci !')
self.df = pd.read_csv(fname, header=0)
train_cols = self.df.columns[0:-2]
self.labels = self.df.columns[-2]
self.labels = self.df[self.labels]
self.df = self.df[train_cols]
for col_name in self.df.columns:
self.df[col_name].fillna(self.df[col_name].mode()[0], inplace=True)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
# Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)
if isinstance(idx, torch.Tensor):
idx = idx.tolist()
return torch.FloatTensor(self.df.iloc[idx].values.astype(float)), self.labels[idx].astype(np.long)
class CreditCardFraudDataset(Dataset):
def __init__(self, root):
fname = os.path.join(root, 'creditcard.csv')
if not os.path.exists(fname):
raise('Please download the data from https://www.kaggle.com/mlg-ulb/creditcardfraud !')
self.df = pd.read_csv(fname, header=0)
self.df = self.df.dropna()
self.labels = self.df[self.df.columns[-1]]
self.df = self.df[self.df.columns[0:-1]]
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
# Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)
if isinstance(idx, torch.Tensor):
idx = idx.tolist()
return torch.FloatTensor(self.df.iloc[idx].values.astype(float)), self.labels[idx].astype(np.long)
|
the-stack_0_20940 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import default_main_program
from ... import default_startup_program
from ... import layers
from ... import unique_name
from . import fp16_utils
from .fp16_utils import update_loss_scaling, rewrite_program
from .fp16_utils import update_role_var_grad
from .fp16_lists import AutoMixedPrecisionLists
__all__ = ["decorate"]
class OptimizerWithMixedPrecison(object):
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pretraining. The object
of this class almost has the same behavior as the common optimizer, with the
methods `minimize()`, `backward()`, `apply_gradients()` implemented.
Additionally, it enables the MP training automatically, i.e, the creation
and maintenance of master parameters, scaling of loss, etc.
Args:
optimizer (Optimizer): A common Optimizer object.
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
init_loss_scaling (float): The initial loss scaling factor.
use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
"""
def __init__(self, optimizer, amp_lists, init_loss_scaling,
use_dynamic_loss_scaling, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
self._optimizer = optimizer
self._amp_lists = amp_lists
self._param_grads = None
self._train_program = default_main_program()
self._startup_prog = default_startup_program()
self._scaled_loss = None
self._loss_scaling = layers.create_global_var(
name=unique_name.generate("loss_scaling"),
shape=[1],
value=init_loss_scaling,
dtype='float32',
persistable=True)
self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
if self._use_dynamic_loss_scaling:
self._incr_every_n_steps = layers.fill_constant(
shape=[1], dtype='int32', value=incr_every_n_steps)
self._decr_every_n_nan_or_inf = layers.fill_constant(
shape=[1], dtype='int32', value=decr_every_n_nan_or_inf)
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._num_good_steps = layers.create_global_var(
name=unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
self._num_bad_steps = layers.create_global_var(
name=unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
# Ensure the data type of learning rate vars is float32 (same as the
# master parameter dtype)
if isinstance(optimizer._learning_rate, float):
optimizer._learning_rate_map[default_main_program()] = \
layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(optimizer._learning_rate),
dtype='float32',
persistable=True)
def get_loss_scaling(self):
"""Return the real-time loss scaling factor.
"""
return self._loss_scaling
def get_scaled_loss(self):
"""Return the scaled loss.
It's useful when you feed customed loss into executor.
"""
return self._scaled_loss
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
Backward propogation or auto differentiation for gradients' computation.
Args:
loss (Variable): The loss Variable to minimize.
startup_program (Program|None): The startup Program for initializing
parameters in `parameter_list`.
parameter_list (list|None): A list of Variables to update.
no_grad_set (set|None): A set of Variables should be ignored.
callbacks (list|None): A list of callables to run when appending
backward operator for one parameter.
Returns:
A list of (param, grad), which is a tuple of a parameter and its
gradient respectively, and the scaled loss.
"""
rewrite_program(self._train_program, self._amp_lists)
self._scaled_loss = loss * self._loss_scaling
self._params_grads = self._optimizer.backward(
self._scaled_loss, startup_program, parameter_list, no_grad_set,
callbacks)
update_role_var_grad(self._train_program, self._params_grads)
scaled_params_grads = []
for p, g in self._params_grads:
with self._train_program._optimized_guard([p, g]):
scaled_g = g / self._loss_scaling
scaled_params_grads.append([p, scaled_g])
return scaled_params_grads
def apply_gradients(self, scaled_params_grads):
"""
Check scaled gradients to determine whether to update loss scaling and update
parameters by their scaled gradients,
Args:
scaled_params_grads (list): A list of params and scaled grads.
Returns:
A list of optimize operators.
"""
if self._use_dynamic_loss_scaling:
grads = [layers.reduce_sum(g) for [_, g] in scaled_params_grads]
all_grads = layers.concat(grads)
all_grads_sum = layers.reduce_sum(all_grads)
is_overall_finite = layers.isfinite(all_grads_sum)
update_loss_scaling(is_overall_finite, self._loss_scaling,
self._num_good_steps, self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf, self._incr_ratio,
self._decr_ratio)
# apply_gradient append all ops in global block, thus we shouldn't
# apply gradient in the switch branch.
with layers.Switch() as switch:
with switch.case(is_overall_finite):
pass
with switch.default():
for _, g in scaled_params_grads:
layers.assign(layers.zeros_like(g), g)
optimize_ops = self._optimizer.apply_gradients(scaled_params_grads)
return optimize_ops
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
Perform optimization by minimizing the given loss.
Args:
loss (Variable): The loss Variable.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
The scaled loss by scaling factor, the list of optimize ops, and a
list of scaled parameters and gradients.
"""
scaled_params_grads = self.backward(
loss,
startup_program=startup_program,
parameter_list=parameter_list,
no_grad_set=no_grad_set)
optimize_ops = self.apply_gradients(scaled_params_grads)
return optimize_ops, scaled_params_grads
def decorate(optimizer,
amp_lists=None,
init_loss_scaling=1.0,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
use_dynamic_loss_scaling=True):
"""
Decorate the given optimizer to adapt to the mixed-precision training.
Args:
optimizer(Optimizer): A common Optimizer.
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
init_loss_scaling(float): The initial loss scaling factor.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling.
Returns:
An optimizer acting like a normal one but with mixed-precision training
enabled.
Examples:
.. code-block:: python
loss = network()
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
mp_optimizer = fluid.contrib.mixed_precision.decorate(
optimizer=optimizer, init_loss_scaling=8.0)
ops, param_grads = mp_optimizer.minimize(loss)
scaled_loss = mp_optimizer.get_scaled_loss()
"""
if amp_lists is None:
amp_lists = AutoMixedPrecisionLists()
mp_optimizer = OptimizerWithMixedPrecison(
optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
return mp_optimizer
|
the-stack_0_20941 | import re
from concurrent.futures import as_completed
from os import name as os_name
from pathlib import Path
from requests_futures.sessions import FuturesSession
from pathvalidate import sanitize_filepath
from pytr.utils import preview, Timeline, get_logger
class DL:
def __init__(self, tr, output_path, filename_fmt, since_timestamp=0):
'''
tr: api object
output_path: name of the directory where the downloaded files are saved
filename_fmt: format string to customize the file names
since_timestamp: downloaded files since this date (unix timestamp)
'''
self.tr = tr
self.output_path = Path(output_path)
self.filename_fmt = filename_fmt
self.since_timestamp = since_timestamp
self.session = FuturesSession()
self.futures = []
self.docs_request = 0
self.done = 0
self.filepaths = []
self.doc_urls = []
self.tl = Timeline(self.tr)
self.log = get_logger(__name__)
async def dl_loop(self):
await self.tl.get_next_timeline(max_age_timestamp=self.since_timestamp)
while True:
_subscription_id, subscription, response = await self.tr.recv()
# try:
# _subscription_id, subscription, response = await self.tr.recv()
# except TradeRepublicError as e:
# self.log.error(str(e))
if subscription['type'] == 'timeline':
await self.tl.get_next_timeline(response, max_age_timestamp=self.since_timestamp)
elif subscription['type'] == 'timelineDetail':
await self.tl.timelineDetail(response, self, max_age_timestamp=self.since_timestamp)
else:
self.log.warning(f"unmatched subscription of type '{subscription['type']}':\n{preview(response)}")
def dl_doc(self, doc, titleText, subtitleText, subfolder=None):
'''
send asynchronous request, append future with filepath to self.futures
'''
doc_url = doc['action']['payload']
date = doc['detail']
iso_date = '-'.join(date.split('.')[::-1])
# extract time from subtitleText
time = re.findall('um (\\d+:\\d+) Uhr', subtitleText)
if time == []:
time = ''
else:
time = f' {time[0]}'
if subfolder is not None:
directory = self.output_path / subfolder
else:
directory = self.output_path
# If doc_type is something like 'Kosteninformation 2', then strip the 2 and save it in doc_type_num
doc_type = doc['title'].rsplit(' ')
if doc_type[-1].isnumeric() is True:
doc_type_num = f' {doc_type.pop()}'
else:
doc_type_num = ''
doc_type = ' '.join(doc_type)
titleText = titleText.replace('\n', '').replace('/', '-')
subtitleText = subtitleText.replace('\n', '').replace('/', '-')
filename = self.filename_fmt.format(
iso_date=iso_date, time=time, title=titleText, subtitle=subtitleText, doc_num=doc_type_num
)
if os_name == 'nt':
badChars = ['/', '\n', ':', '@', '.']
for badChar in badChars:
filename = filename.replace(badChar, '')
if doc_type in ['Kontoauszug', 'Depotauszug']:
filepath = directory / 'Abschlüsse' / f'{filename}' / f'{doc_type}.pdf'
else:
filepath = directory / doc_type / f'{filename}.pdf'
filepath = sanitize_filepath(filepath, '_', os_name)
if filepath in self.filepaths:
self.log.debug(f'File {filepath} already in queue. Skipping...')
return
else:
self.filepaths.append(filepath)
if filepath.is_file() is False:
doc_url_base = doc_url.split('?')[0]
if doc_url_base in self.doc_urls:
self.log.debug(f'URL {doc_url_base} already in queue. Skipping...')
return
else:
self.doc_urls.append(doc_url_base)
future = self.session.get(doc_url)
future.filepath = filepath
self.futures.append(future)
else:
self.log.debug(f'file {filepath} already exists. Skipping...')
def work_responses(self):
'''
process responses of async requests
'''
if len(self.doc_urls) == 0:
self.log.info('Nothing to download')
exit(0)
self.log.info('Waiting for downloads to complete..')
for future in as_completed(self.futures):
if future.filepath.is_file() is True:
self.log.debug(f'file {future.filepath} was already downloaded.')
r = future.result()
future.filepath.parent.mkdir(parents=True, exist_ok=True)
with open(future.filepath, 'wb') as f:
f.write(r.content)
self.done += 1
self.log.debug(f'{self.done:>3}/{len(self.doc_urls)} {future.filepath.name}')
if self.done == len(self.doc_urls):
self.log.info('Done.')
exit(0)
def dl_all(output_path):
'''
TODO
'''
|
the-stack_0_20942 | import logging
import os
import psycopg2
from psycopg2 import extensions
from typing import Optional
import doesntCare
db: psycopg2.extensions.connection
def connect() -> bool:
global db
try:
db = psycopg2.connect(os.environ.get('DATABASE_URL'))
db.autocommit = True
logging.info('Connected to database')
return True
except psycopg2.Error:
logging.exception('Error while connecting to database')
return False
def create_tables() -> bool:
global db
try:
with db.cursor() as cur:
cur.execute(
'CREATE TABLE IF NOT EXISTS \"DC_List\"'
'('
'id SERIAL,'
'chat_id BIGINT NOT NULL,'
'not_important_id TEXT NOT NULL,'
'doesnt_care_id BIGINT NOT NULL,'
'response_mode SMALLINT NOT NULL,'
'response_mode_option REAL NOT NULL,'
'last_response_dt TIMESTAMP NOT NULL,'
'response_counter INTEGER NOT NULL,'
'PRIMARY KEY (id)'
');'
)
logging.info('Tables checked successfully')
cur.execute(
'CREATE UNIQUE INDEX IF NOT EXISTS \"DC_Index\"'
'ON \"DC_List\"('
'chat_id,'
'not_important_id,'
'doesnt_care_id'
')'
)
logging.info('Indexes checked successfully')
return True
except psycopg2.Error:
logging.exception('Error while creating tables')
return False
def insert(dc: doesntCare.DoesntCare) -> bool:
global db
try:
with db.cursor() as cur:
cur.execute(
'INSERT INTO \"DC_List\"(chat_id, not_important_id, doesnt_care_id, response_mode, '
'response_mode_option, last_response_dt, response_counter) '
'VALUES(%s, %s, %s, %s, %s, %s, %s)',
(dc.chat_id, dc.not_important_id, dc.doesnt_care_id, dc.response_mode, dc.response_mode_option,
dc.last_response_dt, dc.response_counter)
)
return True
except psycopg2.Error:
logging.exception('Error while adding new entry to database')
return False
def update(dc: doesntCare.DoesntCare) -> bool:
global db
try:
with db.cursor() as cur:
cur.execute(
'UPDATE \"DC_List\" SET '
'last_response_dt = %s,'
'response_counter = %s'
'WHERE chat_id = %s AND '
'not_important_id = %s AND '
'doesnt_care_id = %s',
(dc.last_response_dt, dc.response_counter, dc.chat_id, dc.not_important_id, dc.doesnt_care_id)
)
return True
except psycopg2.Error:
logging.exception('Error while adding new entry to database')
return False
def remove(dc: doesntCare.DoesntCare) -> bool:
global db
try:
with db.cursor() as cur:
cur.execute(
'DELETE FROM \"DC_List\" WHERE '
'chat_id = %s AND '
'not_important_id = %s AND '
'doesnt_care_id = %s',
(dc.chat_id, dc.not_important_id, dc.doesnt_care_id)
)
return True
except psycopg2.Error:
logging.exception('Error while removing entry from database')
return False
def remove_all_dci(doesnt_care_id: int, chat_id: int) -> bool:
global db
try:
with db.cursor() as cur:
cur.execute(
'DELETE FROM \"DC_List\" WHERE '
'doesnt_care_id = %s AND '
'chat_id = %s',
(doesnt_care_id, chat_id)
)
return True
except psycopg2.Error:
logging.exception('Error while removing all for doesnt_care_id')
return False
def find(chat_id: int, not_important_id: str, doesnt_care_id: int) -> Optional[doesntCare.DoesntCare]:
global db
try:
with db.cursor() as cur:
cur.execute(
'SELECT response_mode, response_mode_option, last_response_dt, response_counter '
'FROM \"DC_List\" WHERE '
'chat_id = %s AND '
'not_important_id = %s AND '
'doesnt_care_id = %s',
(chat_id, not_important_id, doesnt_care_id)
)
row = cur.fetchone()
if row is None:
return None # Record not found
return doesntCare.DoesntCare(
chat_id=chat_id,
not_important_id=not_important_id,
doesnt_care_id=doesnt_care_id,
response_mode=row[0],
response_mode_option=row[1],
last_response_dt=row[2],
response_counter=row[3]
)
except psycopg2.Error:
logging.exception('Error while querying data')
raise
def find_by_nii_ci(not_important_id: str, chat_id: int) -> Optional[list]:
global db
dc_list = []
try:
with db.cursor() as cur:
cur.execute(
'SELECT doesnt_care_id, response_mode, response_mode_option, last_response_dt, response_counter '
'FROM \"DC_List\" WHERE '
'not_important_id = %s AND '
'chat_id = %s',
(not_important_id, chat_id)
)
res = cur.fetchall()
for row in res:
dc_list.append(doesntCare.DoesntCare(
chat_id=chat_id,
not_important_id=not_important_id,
doesnt_care_id=row[0],
response_mode=row[1],
response_mode_option=row[2],
last_response_dt=row[3],
response_counter=row[4]
))
return dc_list
except psycopg2.Error:
logging.exception('Error while querying data')
return None
|
the-stack_0_20943 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
from operator import itemgetter
import click
from ....subprocess import run_command
from ...utils import get_valid_checks, get_version_file
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success
@click.command(
context_settings=CONTEXT_SETTINGS, short_help="Verify if a custom check or integration can run on python 3"
)
@click.argument('check')
def py3(check):
"""Verify if a custom check or integration can run on python 3. CHECK
can be an integration name or a valid path to a Python module or package folder.
"""
if check in get_valid_checks():
path_to_module = os.path.dirname(get_version_file(check))
else:
path_to_module = check
if not os.path.exists(path_to_module):
abort(u"{} does not exist.".format(path_to_module))
echo_info(u"Validating python3 compatibility of {}...".format(check))
cmd = ["pylint", "-f", "json", "--py3k", "-d", "W1618", "--persistent", "no", "--exit-zero", path_to_module]
results = json.loads(run_command(cmd, capture='stdout').stdout)
if results:
echo_failure(u"Incompatibilities were found for {}:".format(check))
current_path = None
for problem in sorted(results, key=itemgetter("path")):
# pylint returns an array a dicts like
# {
# "line": 23,
# "column": 8,
# "message": "Calling a dict.iter*() method",
# "file": "/path/to/file.py",
# }
path = problem["path"]
if current_path is None or path != current_path:
echo_info(u"File {}:".format(path))
echo_failure(" Line {}, column {}: {}".format(problem['line'], problem['column'], problem["message"]))
current_path = path
abort()
else:
echo_success(u"{} is compatible with python3".format(check))
|
the-stack_0_20944 | # -*- coding: utf-8 -*-
from .datatables import Datatable, ValuesDatatable, LegacyDatatable
from .columns import (Column, TextColumn, DateColumn, DateTimeColumn, BooleanColumn, IntegerColumn,
FloatColumn, DisplayColumn, CompoundColumn)
from .exceptions import SkipRecord
__name__ = 'datatableview'
__author__ = 'Autumn Valenta'
__version_info__ = (0, 9, 0)
__version__ = '.'.join(map(str, __version_info__))
__date__ = '2013/11/14 2:00:00 PM'
__credits__ = ['Autumn Valenta', 'Steven Klass']
__license__ = 'See the file LICENSE.txt for licensing information.'
|
the-stack_0_20945 | import tensorflow as tf
LAYER = 5
if LAYER == 7:
from bvlc_alexnet_fc7 import AlexNet
elif LAYER == 5:
from bvlc_alexnet_cn5 import AlexNet
import fine_tune_nt
import numpy as np
import os
import time
import cv2
import image_io
# the dimension of the final layer = feature dim
NN_DIM = 100
LABEL_DIM = 10
TRAIN_TXT = 'file_list_fine_tune_train.txt'
# TRAIN_TXT = 'file_list_fine_tune_test_nba_dunk.txt'
TRAIN = True
SHUFFLE_DATA = False
BATCH_SIZE = 1
FEATURE_ROW = 227
FEATURE_COL = 227
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_log_dir','fine_tune_nn_logs',
'''directory wherer to write event logs''')
tf.app.flags.DEFINE_integer('max_training_iter', 10000,
'''the max number of training iteration''')
tf.app.flags.DEFINE_float('init_learning_rate',0.001,
'''initial learning rate''')
tf.app.flags.DEFINE_string('model_dir', 'fine_tune_nn_model_logs','''directory where to save the model''')
# tf.app.flags.DEFINE_string('feature_dir', 'f7_dir/','''saved feature dir''')
def define_graph_config():
config_proto = tf.ConfigProto()
config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
return config_proto
def calculate_iter():
with open(TRAIN_TXT, 'r') as f:
s = f.read()
s_l = s.split('\n')
total_num = len(s_l)
FLAGS.max_training_iter = int(total_num / BATCH_SIZE) + 1
print(FLAGS.max_training_iter)
def write_feature(file_name, feature):
assert(len(file_name) == len(feature))
for i in range(len(file_name)):
if LAYER == 7:
f_name = file_name[i].replace(".jpg",".fc7")
elif LAYER == 5:
f_name = file_name[i].replace(".jpg",".cn5")
feature.tofile(f_name)
def filequeue_to_batch_data(filename_queue, line_reader, batch_size = BATCH_SIZE):
key, next_line = line_reader.read(filename_queue)
query_image_name, label = tf.decode_csv(
next_line, [tf.constant([], dtype=tf.string),
tf.constant([], dtype = tf.int32)], field_delim=" ")
reverse_channel = True # for pre-trained purpose
query_tensor = image_io.read_image(query_image_name, reverse_channel,
FEATURE_ROW, FEATURE_COL)
batch_query_image, batch_label, batch_image_name = tf.train.batch(
[query_tensor, label, query_image_name], batch_size=batch_size)
return batch_query_image, batch_label, batch_image_name
def train():
calculate_iter()
train_filenamequeue=tf.train.string_input_producer([TRAIN_TXT], shuffle=SHUFFLE_DATA)
line_reader = tf.TextLineReader()
train_batch_image, train_batch_label, batch_image_name = filequeue_to_batch_data(train_filenamequeue, line_reader)
global_step = tf.Variable(0, name = 'global_step', trainable = False)
image_data_ph = tf.placeholder(tf.float32, shape = (BATCH_SIZE, FEATURE_ROW, FEATURE_COL, 3))
label_ph = tf.placeholder(tf.float32, shape = (BATCH_SIZE, LABEL_DIM))
net = AlexNet({'data':image_data_ph})
infer = net.get_output()
new_dim = 1
for d in infer.get_shape().as_list()[1:]:
new_dim *= d
infer_reshape = tf.reshape(infer, [-1,new_dim])
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
merged_sum = tf.merge_all_summaries()
config_proto = define_graph_config()
sess = tf.Session(config = config_proto)
if TRAIN:
writer_sum = tf.train.SummaryWriter(FLAGS.train_log_dir,graph_def = sess.graph_def)
init_op = tf.initialize_all_variables()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord, sess = sess)
if TRAIN:
for i in xrange(FLAGS.max_training_iter):
batch_image_v, batch_image_name_v = sess.run([train_batch_image, batch_image_name])
feed_data = {image_data_ph: batch_image_v}
infer_v = sess.run(infer_reshape, feed_dict = feed_data)
write_feature(batch_image_name_v, infer_v)
def main(argv = None):
# if not os.path.exists(FLAGS.feature_dir):
# os.makedirs(FLAGS.feature_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_20946 | from typing import List, Tuple, Union, Dict, Any, Optional
import click
import wandb
import pandas as pd
import argparse
import pathlib
import sys
from .wandb_utils import (
wandb_utils,
pass_api_wrapper,
pass_api_and_info,
config_file_decorator,
)
from .common import processor
from wandb_utils.file_filter import FileFilter, GlobBasedFileFilter
import logging
import tqdm
import pandas as pd
logger = logging.getLogger(__name__)
def download_run_from_wandb(
api: wandb.PublicApi,
entity: Optional[str],
project: Optional[str],
sweep: Optional[str],
run: Optional[str],
output_dir: pathlib.Path,
include_filter: Optional[List[str]] = None,
exclude_filter: Optional[List[str]] = None,
overwrite: bool = False,
move: bool = False,
) -> None:
run_ = api.run(f"{entity}/{project}/{run}")
# pbar = tqdm.tqdm(run_.files(), desc="Downloading files")
output_dir.mkdir(parents=True, exist_ok=overwrite)
ff = GlobBasedFileFilter(
include_filter=include_filter, exclude_filter=exclude_filter
)
for file_ in run_.files():
if ff(file_):
# pbar.set_description(f"Downloading: {file_.name}")
logger.debug(f"Downloading: {file_.name}")
file_.download(output_dir, replace=overwrite)
def download_runs_from_wandb(
df: pd.DataFrame,
api: wandb.PublicApi,
entity: Optional[str],
project: Optional[str],
sweep: Optional[str],
output_dir_field: str,
include_filter: Optional[List[str]] = None,
exclude_filter: Optional[List[str]] = None,
) -> pd.DataFrame:
for row in tqdm.tqdm(df.iterrows(), desc="Downloading runs' files"):
download_run_from_wandb(
api,
entity,
project,
sweep,
row["run"],
pathlib.Path(row[output_dir_field]),
include_filter,
exclude_filter,
)
@click.command(name="download-run-from-wandb")
@click.argument("run", type=str)
@click.option(
"-o",
"--output_dir",
required=True,
type=click.Path(path_type=pathlib.Path), # type: ignore
help="Directory in which to save the run data. It will be saved in output_dir/runid",
)
@click.option(
"--include_filter",
multiple=True,
type=str,
help="Glob string for files to include (can pass multiple). See `glob_filter.py` for details.",
)
@click.option(
"--exclude_filter",
multiple=True,
type=str,
help="Glob string for Files to exclude (can pass multiple). See `glob_filter.py` for details.",
)
@click.option("--overwrite", is_flag=True)
@pass_api_and_info
@processor
@config_file_decorator()
def download_run_from_wandb_command(
df: pd.DataFrame,
api: wandb.PublicApi,
entity: Optional[str],
project: Optional[str],
sweep: Optional[str],
run: str,
output_dir: pathlib.Path,
include_filter: Optional[List[str]] = None,
exclude_filter: Optional[List[str]] = None,
overwrite: bool = False,
) -> pd.DataFrame:
"""
Download single run from wandb server.
RUN is the unique run id.
"""
return download_run_from_wandb(
api,
entity,
project,
sweep,
run,
output_dir,
include_filter,
exclude_filter,
overwrite,
)
|
the-stack_0_20947 | import os
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from transformer_disentangle.encoders import StyleEncoder, PoseEncoder
from transformer_disentangle.decoder import Decoder
from transformer_disentangle.discriminator import Discriminator
def kl_divergence(mu, logvar):
return - 0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
def mse_loss(input, target):
return (input - target).pow(2).mean()
class VAE_Model(nn.Module):
def __init__(self, args):
super(VAE_Model, self).__init__()
style_enc = StyleEncoder(
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
)
pose_enc = PoseEncoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
)
decoder = Decoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
)
discriminator = Discriminator(
root_size=args.root_size,
feature_size=args.dim_per_limb,
max_num_limbs=args.max_num_limbs
)
self.add_module("style_enc", style_enc)
self.add_module("pose_enc", pose_enc)
self.add_module("decoder", decoder)
self.add_module("discriminator", discriminator)
self.batch_size = args.batch_size
self.latent_dim = args.latent_dim
encoder_parameters = list(self.style_enc.parameters()) + list(self.pose_enc.parameters())
self.auto_encoder_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.lr,
)
self.discriminator_optimizer = optim.Adam(
list(self.discriminator.parameters()),
lr=args.lr,
)
self.generator_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.lr,
)
self.beta = args.beta
self.device = torch.device("cuda" if args.cuda else "cpu")
self.root_size = args.root_size
def train_recon(self, x1, x2, structure):
self.auto_encoder_optimizer.zero_grad()
zs = self.style_enc(structure)
zp_1, mean, logvar = self.pose_enc(x1)
zp_2, mean, logvar = self.pose_enc(x2)
x1_r = self.decoder(zp_1, zs)
x2_r = self.decoder(zp_2, zs)
kl_loss = kl_divergence(mean, logvar).mean()
reconstruction_loss = mse_loss(x1_r, x1) + mse_loss(x2_r, x1)
loss = reconstruction_loss + self.beta * kl_loss
loss.backward()
self.auto_encoder_optimizer.step()
return reconstruction_loss, kl_loss
def train_generator(self, x1, x3, structure3):
self.generator_optimizer.zero_grad()
zp_1, mean, logvar = self.pose_enc(x1)
zs_3 = self.style_enc(structure3)
xr_13 = self.decoder(zp_1, zs_3)
kl_loss = kl_divergence(mean, logvar).mean()
# True labels
true_labels = torch.ones(self.batch_size, 1)
true_labels = true_labels.to(self.device)
d1 = self.discriminator(x3, xr_13)
gen_loss_1 = F.binary_cross_entropy(d1, true_labels)
z_random = torch.normal(0, 1, size=(self.batch_size, self.latent_dim))
z_random = z_random.to(self.device)
xr_r3 = self.decoder(z_random, zs_3)
d2 = self.discriminator(x3, xr_r3)
gen_loss_2 = F.binary_cross_entropy(d2, true_labels)
generator_loss = gen_loss_1 + gen_loss_2 + self.beta * kl_loss
generator_loss.backward()
self.generator_optimizer.step()
return gen_loss_1, gen_loss_2, kl_loss
def train_discriminator(self, x1, x2, x3, structure3):
self.discriminator_optimizer.zero_grad()
true_labels = torch.ones(self.batch_size, 1)
true_labels = true_labels.to(self.device)
d_real = self.discriminator(x2, x3)
disc_loss_real = F.binary_cross_entropy(d_real, true_labels)
fake_labels = torch.zeros(self.batch_size, 1)
fake_labels = fake_labels.to(self.device)
zp_1, mean, logvar = self.pose_enc(x1)
zs_3 = self.style_enc(structure3)
xr_13 = self.decoder(zp_1, zs_3)
d_fake = self.discriminator(x3, xr_13)
disc_loss_fake = F.binary_cross_entropy(d_fake, fake_labels)
discriminator_loss = disc_loss_real + disc_loss_fake
discriminator_loss.backward()
self.discriminator_optimizer.step()
return discriminator_loss
def save_model(self, path):
model_path = os.path.join(path, 'vae_model')
torch.save({
"pose_encoder": self.pose_enc.state_dict(),
}, model_path)
torch.save({
"style_encoder": self.style_enc.state_dict(),
}, model_path)
torch.save({
"decoder": self.decoder.state_dict(),
}, model_path)
torch.save({
"discriminator": self.discriminator.state_dict(),
}, model_path)
def load_model(self, path):
model_path = os.path.join(path, 'vae_model')
data = torch.load(model_path)
self.encoder.load_state_dict(data['encoder']) |
the-stack_0_20949 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Problem 021
Divisible Sum Pairs
Source : https://www.hackerrank.com/challenges/divisible-sum-pairs/problem
"""
_, d = map(int, input().split())
numbers = list(map(int, input().split()))
nb = len(numbers)
count = 0
for i in range(nb-1):
for j in range(i+1, nb):
count += (numbers[i] + numbers[j]) % d == 0
print(count)
|
the-stack_0_20951 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import io
import msgpack
import struct
GCM_SIZE_TAG = 16
GCM_SIZE_IV = 12
LEDGER_TRANSACTION_SIZE = 4
LEDGER_DOMAIN_SIZE = 8
def to_uint_32(buffer):
return struct.unpack("@I", buffer)[0]
def to_uint_64(buffer):
return struct.unpack("@Q", buffer)[0]
class GcmHeader:
_gcm_tag = ["\0"] * GCM_SIZE_TAG
_gcm_iv = ["\0"] * GCM_SIZE_IV
def __init__(self, buffer):
if len(buffer) < GcmHeader.size():
raise ValueError("Corrupt GCM header")
self._gcm_tag = struct.unpack(f"@{GCM_SIZE_TAG}B", buffer[:GCM_SIZE_TAG])
self._gcm_iv = struct.unpack(f"@{GCM_SIZE_IV}B", buffer[GCM_SIZE_TAG:])
def size():
return GCM_SIZE_TAG + GCM_SIZE_IV
class LedgerDomain:
_buffer = None
_unpacker = None
_buffer_size = 0
_version = 0
_read_version = 0
_tables = {}
def __init__(self, buffer):
self._buffer = buffer
self._buffer_size = buffer.getbuffer().nbytes
self._unpacker = msgpack.Unpacker(self._buffer, raw=True, strict_map_key=False)
self._version = self._read_next()
self._read()
def _read_next(self):
return self._unpacker.unpack()
def _read_next_string(self):
return self._unpacker.unpack().decode()
def _read(self):
while self._buffer_size > self._unpacker.tell():
map_start_indicator = self._read_next()
map_name = self._read_next_string()
records = {}
self._tables[map_name] = records
read_version = self._read_next()
read_count = self._read_next()
write_count = self._read_next()
if write_count:
for i in range(write_count):
k = self._read_next()
val = self._read_next()
records[k] = val
remove_count = self._read_next()
if remove_count:
for i in range(remove_count):
k = self._read_next()
records[k] = None
def get_tables(self):
return self._tables
def _byte_read_safe(file, num_of_bytes):
ret = file.read(num_of_bytes)
if len(ret) != num_of_bytes:
raise ValueError("Failed to read precise number of bytes: %u" % num_of_bytes)
return ret
class Transaction:
_file = None
_total_size = 0
_public_domain_size = 0
_next_offset = 0
_public_domain = None
_file_size = 0
gcm_header = None
def __init__(self, filename):
self._file = open(filename, mode="rb")
self._file.seek(0, 2)
self._file_size = self._file.tell()
self._file.seek(0, 0)
def __del__(self):
self._file.close()
def _read_header(self):
# read the size of the transaction
buffer = _byte_read_safe(self._file, LEDGER_TRANSACTION_SIZE)
self._total_size = to_uint_32(buffer)
self._next_offset += self._total_size
self._next_offset += LEDGER_TRANSACTION_SIZE
# read the AES GCM header
buffer = _byte_read_safe(self._file, GcmHeader.size())
self.gcm_header = GcmHeader(buffer)
# read the size of the public domain
buffer = _byte_read_safe(self._file, LEDGER_DOMAIN_SIZE)
self._public_domain_size = to_uint_64(buffer)
def get_public_domain(self):
if self._public_domain == None:
buffer = io.BytesIO(_byte_read_safe(self._file, self._public_domain_size))
self._public_domain = LedgerDomain(buffer)
return self._public_domain
def _complete_read(self):
self._file.seek(self._next_offset, 0)
self._public_domain = None
def __iter__(self):
return self
def __next__(self):
if self._next_offset == self._file_size:
raise StopIteration()
try:
self._complete_read()
self._read_header()
return self
except:
raise StopIteration()
class Ledger:
_filename = None
def __init__(self, filename):
self._filename = filename
def __iter__(self):
return Transaction(self._filename)
|
the-stack_0_20954 | '''
Unit tests for the gbdxtools.Vectors class
See tests/readme.md for more about tests
'''
from gbdxtools import Interface
from gbdxtools.vectors import Vectors, AggregationDef, GeohashAggDef, \
DateHistogramAggDef, TermsAggDef, AvgAggDef, \
SumAggDef, AvgGeoLatAggDef, AvgGeoLonAggDef, CardinalityAggDef
from helpers import mockable_interface, gbdx_vcr
import unittest
import types
import json
class TestVectors(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.gbdx = mockable_interface()
def test_init(self):
c = Vectors()
self.assertIsInstance(c, Vectors)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_search_1010.yaml')
def test_vectors_search_paging(self):
aoi = "POLYGON ((180 -90, 180 90, -180 90, -180 -90, 180 -90))"
results = self.gbdx.vectors.query(aoi, query="item_type:WV03_VNIR", count=1010)
self.assertEqual(len(results), 1010)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_search_55.yaml')
def test_vectors_search_count_small(self):
aoi = "POLYGON((17 25, 18 25, 18 24, 17 24, 17 25))"
results = self.gbdx.vectors.query(aoi, query='item_type:WV02', count=55)
self.assertEqual(len(results), 55)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_search_1.yaml')
def test_vectors_search_count_single(self):
aoi = "POLYGON((17 25, 18 25, 18 24, 17 24, 17 25))"
results = self.gbdx.vectors.query(aoi, query="item_type:WV02", count=1)
self.assertEqual(len(results), 1)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_search_index.yaml')
def test_vectors_search_index(self):
aoi = 'POLYGON ((-117.1 37.9, -117.1 38.1, -117.3 38.1, -117.3 37.9, -117.1 37.9))'
results = self.gbdx.vectors.query(aoi, query="item_type:tweet", index="vector-sma-twitter*", count=100)
self.assertEqual(len(results), 17)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_search_iterate.yaml')
def test_vectors_search_iteratively(self):
'''Run the same query directly and through paging'''
aoi = "POLYGON((17 25, 18 25, 18 24, 17 24, 17 25))"
query = "item_type:WV02"
count = 150
generator = self.gbdx.vectors.query_iteratively(aoi, query=query, count=count)
results = self.gbdx.vectors.query(aoi, query=query, count=count)
self.assertIsInstance(generator, types.GeneratorType)
self.assertEqual(len(results), count)
self.assertEqual(len(list(generator)), len(results))
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_create_single.yaml')
def test_vectors_create_single(self):
results = self.gbdx.vectors.create({
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1.0,1.0]
},
"properties": {
"text" : "item text",
"name" : "item name",
"item_type" : "type",
"ingest_source" : "source",
"attributes" : {
"latitude" : 1,
"institute_founded" : "2015-07-17",
"mascot" : "moth"
}
}
})
for result in results['successfulItemIds']:
self.assertEqual(result, '/api/vector/vector-user-provided/e0f25c1c-9078-476b-ac5d-4c7fb08bb79a')
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_create_multiple.yaml')
def test_vectors_create_multiple(self):
results = self.gbdx.vectors.create([{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1.0,1.0]
},
"properties": {
"text" : "item text",
"name" : "item name",
"item_type" : "type",
"ingest_source" : "source",
"attributes" : {
"latitude" : 1,
"institute_founded" : "2015-07-17",
"mascot" : "moth"
}
}
},
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1.0,1.0]
},
"properties": {
"text" : "item text",
"name" : "item name",
"item_type" : "type",
"ingest_source" : "source",
"attributes" : {
"latitude" : 1,
"institute_founded" : "2015-07-17",
"mascot" : "asdfadsfadf"
}
}
}])
self.assertEqual(len(results), 2)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_create_from_wkt.yaml')
def test_vectors_create_from_wkt(self):
aoi = "POLYGON((0 3,3 3,3 0,0 0,0 3))"
result = self.gbdx.vectors.create_from_wkt(
aoi,
item_type='test_type_123',
ingest_source='api',
attribute1='nothing',
attribute2='something',
number=6,
date='2015-06-06'
)
self.assertEqual(result, '488f1b61-a539-447e-bced-e66563040a89')
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_aggregate_query_with_default_index.yaml')
def test_vectors_aggregate_query_with_default_index(self):
wkt = 'POLYGON((-76.65 40.10, -76.65 40.14, -76.55 40.14, -76.55 40.10, -76.65 40.10))'
aggs = 'terms:item_type'
result = self.gbdx.vectors.aggregate_query(wkt, aggs)
self.assertEqual(len(result), 1)
self.assertIn('name', result[0])
self.assertEqual(result[0]['name'],'terms:item_type')
self.assertIn('terms', result[0])
self.assertEqual(len(result[0]['terms']), 10)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_aggregate_query_with_defined_index.yaml')
def test_vectors_aggregate_query_with_defined_index(self):
wkt = 'POLYGON((-76.65 40.10, -76.65 40.14, -76.55 40.14, -76.55 40.10, -76.65 40.10))'
aggs = 'terms:item_type'
result = self.gbdx.vectors.aggregate_query(wkt, aggs, index='read-vector-osm-*')
self.assertEqual(len(result), 1)
self.assertIn('name', result[0])
self.assertEqual(result[0]['name'],'terms:item_type')
self.assertIn('terms', result[0])
self.assertEqual(len(result[0]['terms']), 10)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_aggregate_query_simple.yaml')
def test_vectors_aggregate_query_agg_string(self):
wkt = 'POLYGON((-76.65 40.10, -76.65 40.14, -76.55 40.14, -76.55 40.10, -76.65 40.10))'
aggs = 'terms:ingest_source'
result = self.gbdx.vectors.aggregate_query(wkt, aggs)
self.assertEqual(len(result), 1)
self.assertIn('name', result[0])
self.assertEqual(result[0]['name'],'terms:ingest_source')
self.assertIn('terms', result[0])
self.assertEqual(len(result[0]['terms']), 1)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_aggregate_query_simple.yaml')
def test_vectors_aggregate_query_agg_def(self):
wkt = 'POLYGON((-76.65 40.10, -76.65 40.14, -76.55 40.14, -76.55 40.10, -76.65 40.10))'
aggs = AggregationDef(agg_type='terms', value='ingest_source')
result = self.gbdx.vectors.aggregate_query(wkt, aggs)
self.assertEqual(len(result), 1)
self.assertIn('name', result[0])
self.assertEqual(result[0]['name'],'terms:ingest_source')
self.assertIn('terms', result[0])
self.assertEqual(len(result[0]['terms']), 1)
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_vectors_aggregate_query_complex.yaml')
def test_vectors_aggregate_query_complex(self):
wkt = 'POLYGON((-76.65 40.10, -76.65 40.14, -76.55 40.14, -76.55 40.10, -76.65 40.10))'
child_agg = AggregationDef(agg_type='date_hist', value='month')
aggs = AggregationDef(agg_type='geohash', value='4', children=child_agg)
query = 'item_type:tweet'
start_date = 'now-12M'
end_date = 'now'
result = self.gbdx.vectors.aggregate_query(wkt, aggs, index='vector-sma-twitter*', query=query, start_date=start_date, end_date=end_date)
self.assertEqual(len(result), 1)
self.assertIn('name', result[0])
self.assertEqual(result[0]['name'],'geohash:4')
self.assertIn('terms', result[0])
terms = result[0]['terms']
self.assertEqual(len(terms), 1)
self.assertEqual(terms[0]['term'], 'dr1s')
self.assertEqual(len(terms[0]['aggregations']), 1)
self.assertEqual(len(terms[0]['aggregations'][0]['terms']), 2)
def test_agg_def_repr_no_children(self):
agg_def = AggregationDef(agg_type='terms', value='ingest_source')
self.assertEqual(agg_def.__repr__(), 'terms:ingest_source')
def test_agg_def_repr_with_children(self):
grandkids = [
AggregationDef(agg_type='cardinality', value='ingest_source'),
AggregationDef(agg_type='terms', value='ingest_source')
]
kid = AggregationDef(agg_type='date_hist', value='d', children=grandkids)
agg_def = AggregationDef(agg_type='geohash', value='4', children=kid)
self.assertEqual(agg_def.__repr__(), 'geohash:4;date_hist:d;(cardinality:ingest_source,terms:ingest_source)')
def test_geohash_agg_def_constructor(self):
agg_def = GeohashAggDef()
self.assertEqual(agg_def.agg_type, 'geohash')
self.assertEqual(agg_def.value, '3')
agg_def = GeohashAggDef('6')
self.assertEqual(agg_def.agg_type, 'geohash')
self.assertEqual(agg_def.value, '6')
child_def = TermsAggDef('item_type')
agg_def = GeohashAggDef('2', children=child_def)
self.assertEqual(agg_def.agg_type, 'geohash')
self.assertEqual(agg_def.value, '2')
self.assertEqual(agg_def.children, child_def)
def test_date_hist_agg_def_constructor(self):
agg_def = DateHistogramAggDef()
self.assertEqual(agg_def.agg_type, 'date_hist')
self.assertEqual(agg_def.value, 'M')
agg_def = DateHistogramAggDef('w')
self.assertEqual(agg_def.agg_type, 'date_hist')
self.assertEqual(agg_def.value, 'w')
child_def = TermsAggDef('item_type')
agg_def = DateHistogramAggDef('d', children=child_def)
self.assertEqual(agg_def.agg_type, 'date_hist')
self.assertEqual(agg_def.value, 'd')
self.assertEqual(agg_def.children, child_def)
def test_terms_agg_def_constructor(self):
agg_def = TermsAggDef('foo')
self.assertEqual(agg_def.agg_type, 'terms')
self.assertEqual(agg_def.value, 'foo')
child_def = DateHistogramAggDef('d')
agg_def = TermsAggDef('bar', children=child_def)
self.assertEqual(agg_def.agg_type, 'terms')
self.assertEqual(agg_def.value, 'bar')
self.assertEqual(agg_def.children, child_def)
with self.assertRaises(Exception) as context:
agg_def = TermsAggDef()
self.assertTrue('The "field" property cannot be empty.' in str(context.exception))
def test_cardinality_agg_def_constructor(self):
agg_def = CardinalityAggDef('foo')
self.assertEqual(agg_def.agg_type, 'cardinality')
self.assertEqual(agg_def.value, 'foo')
with self.assertRaises(Exception) as context:
agg_def = CardinalityAggDef()
self.assertTrue('The "field" property cannot be empty.' in str(context.exception))
def test_avg_agg_def_constructor(self):
agg_def = AvgAggDef('foo')
self.assertEqual(agg_def.agg_type, 'avg')
self.assertEqual(agg_def.value, 'foo')
with self.assertRaises(Exception) as context:
agg_def = AvgAggDef()
self.assertTrue('The "field" property cannot be empty.' in str(context.exception))
def test_sum_agg_def_constructor(self):
agg_def = SumAggDef('foo')
self.assertEqual(agg_def.agg_type, 'sum')
self.assertEqual(agg_def.value, 'foo')
with self.assertRaises(Exception) as context:
agg_def = SumAggDef()
self.assertTrue('The "field" property cannot be empty.' in str(context.exception))
def test_avg_geo_lat_agg_def_constructor(self):
agg_def = AvgGeoLatAggDef()
self.assertEqual(agg_def.agg_type, 'avg_geo_lat')
self.assertFalse(agg_def.value)
self.assertEqual(str(agg_def), 'avg_geo_lat')
def test_avg_geo_lon_agg_def_constructor(self):
agg_def = AvgGeoLonAggDef()
self.assertEqual(agg_def.agg_type, 'avg_geo_lon')
self.assertFalse(agg_def.value)
self.assertEqual(str(agg_def), 'avg_geo_lon') |
the-stack_0_20955 |
import os, sys, subprocess, multiprocessing, re, string, json, shutil, logging, traceback
import shared
from js_optimizer import *
DUPLICATE_FUNCTION_ELIMINATOR = path_from_root('tools', 'eliminate-duplicate-functions.js')
def process_shell(js, js_engine, shell, equivalentfn_hash_info=None):
suffix = '.eliminatedupes'
temp_file = temp_files.get(suffix + '.js').name
f = open(temp_file, 'w')
f.write(shell)
f.write('\n')
f.write(equivalentfn_hash_info)
f.close()
(output,error) = subprocess.Popen(js_engine +
[DUPLICATE_FUNCTION_ELIMINATOR, temp_file, '--use-hash-info', '--no-minimize-whitespace'],
stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()
assert len(output) > 0
assert len(error) == 0
return output
def run_on_chunk(command):
try:
file_suffix = '.js'
index = command.index(DUPLICATE_FUNCTION_ELIMINATOR)
filename = command[index + 1]
if '--gen-hash-info' in command:
file_suffix = '.json'
if os.environ.get('EMCC_SAVE_OPT_TEMP') and os.environ.get('EMCC_SAVE_OPT_TEMP') != '0':
saved = 'save_' + os.path.basename(filename)
while os.path.exists(saved): saved = 'input' + str(int(saved.replace('input', '').replace('.txt', ''))+1) + '.txt'
print >> sys.stderr, 'running DFE command', ' '.join(map(lambda c: c if c != filename else saved, command))
shutil.copyfile(filename, os.path.join(shared.get_emscripten_temp_dir(), saved))
if shared.EM_BUILD_VERBOSE_LEVEL >= 3: print >> sys.stderr, 'run_on_chunk: ' + str(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
output = proc.communicate()[0]
assert proc.returncode == 0, 'Error in optimizer (return code ' + str(proc.returncode) + '): ' + output
assert len(output) > 0 and not output.startswith('Assertion failed'), 'Error in optimizer: ' + output
filename = temp_files.get(os.path.basename(filename) + '.jo' + file_suffix).name
# Important to write out in binary mode, because the data we are writing contains Windows line endings '\r\n' because it was PIPED from console.
# Otherwise writing \r\n to ascii mode file will result in Windows amplifying \n to \r\n, generating bad \r\r\n line endings.
f = open(filename, 'wb')
f.write(output)
f.close()
if DEBUG and not shared.WINDOWS: print >> sys.stderr, '.' # Skip debug progress indicator on Windows, since it doesn't buffer well with multiple threads printing to console.
return filename
except KeyboardInterrupt:
# avoid throwing keyboard interrupts from a child process
raise Exception()
except (TypeError, ValueError) as e:
formatted_lines = traceback.format_exc().splitlines()
print >> sys.stderr, ">>>>>>>>>>>>>>>>>"
for formatted_line in formatted_lines:
print >> sys.stderr, formatted_line
print >> sys.stderr, "<<<<<<<<<<<<<<<<<"
raise
def dump_equivalent_functions(passed_in_filename, global_data):
# Represents the sets of equivalent functions for the passed in filename
equivalent_fn_info = {}
equivalent_fn_json_file = passed_in_filename + ".equivalent_functions.json"
# If we are running more than one pass, then we want to merge
# all the hash infos into one
if os.path.isfile(equivalent_fn_json_file):
print >> sys.stderr, "Merging data from current pass for {} into {}".format(passed_in_filename, equivalent_fn_json_file)
with open(equivalent_fn_json_file) as data_file:
equivalent_fn_info = json.load(data_file)
else:
print >> sys.stderr, "Writing equivalent functions for {} to {}".format(passed_in_filename, equivalent_fn_json_file)
# Merge the global data's fn_hash_to_fn_name structure into
# the equivalent function info hash.
for fn_hash, fn_names in global_data['fn_hash_to_fn_name'].iteritems():
if fn_hash not in equivalent_fn_info:
# Exclude single item arrays as they are of no use to us.
if len(fn_names) > 1:
equivalent_fn_info[fn_hash] = fn_names[:]
else:
for fn_name in fn_names:
if fn_name not in equivalent_fn_info[fn_hash]:
equivalent_fn_info[fn_hash].append(fn_name)
with open(equivalent_fn_json_file, 'w') as fout:
fout.write(json.dumps(equivalent_fn_info))
def write_equivalent_fn_hash_to_file(f, json_files, passed_in_filename):
# Represents the aggregated info for all the json files passed in
# Each json file contains info for one of the processed chunks
global_data = {}
global_data['fn_hash_to_fn_name'] = {}
global_data['fn_hash_to_fn_body'] = {}
global_data['variable_names'] = {}
for json_file in json_files:
with open(json_file) as data_file:
data = json.load(data_file)
# Merge the data's fn_hash_to_fn_name structure into
# the global data hash.
for fn_hash, fn_names in data['fn_hash_to_fn_name'].iteritems():
if fn_hash not in global_data['fn_hash_to_fn_name']:
global_data['fn_hash_to_fn_name'][fn_hash] = fn_names[:]
global_data['fn_hash_to_fn_body'][fn_hash] = data['fn_hash_to_fn_body'][fn_hash]
else:
assert(data['fn_hash_to_fn_body'][fn_hash] == global_data['fn_hash_to_fn_body'][fn_hash])
for fn_name in fn_names:
if fn_name not in global_data['fn_hash_to_fn_name'][fn_hash]:
global_data['fn_hash_to_fn_name'][fn_hash].append(fn_name)
# Merge the data's variable_names structure into
# the global data hash.
for variable, value in data['variable_names'].iteritems():
if variable not in global_data['variable_names']:
global_data['variable_names'][variable] = value
variable_names = global_data['variable_names']
# Lets generate the equivalent function hash from the global data set
equivalent_fn_hash = {}
for fn_hash, fn_names in global_data['fn_hash_to_fn_name'].iteritems():
shortest_fn = None
for fn_name in fn_names:
if (fn_name not in variable_names) and (shortest_fn is None or (len(fn_name) < len(shortest_fn))):
shortest_fn = fn_name
if shortest_fn is not None:
for fn_name in fn_names:
if fn_name not in variable_names and fn_name != shortest_fn:
equivalent_fn_hash[fn_name] = shortest_fn
# Dump the sets of equivalent functions if the user desires it
# This comes in handy for debugging
if shared.Settings.ELIMINATE_DUPLICATE_FUNCTIONS_DUMP_EQUIVALENT_FUNCTIONS:
dump_equivalent_functions(passed_in_filename, global_data)
# Now write the equivalent function hash to the last line of the file
f.write('// ' + json.dumps(equivalent_fn_hash, separators=(',',':')))
# gen_hash_info is used to determine whether we are generating
# the global set of function implementation hashes. If set to
# False, we assume that we have to use the global hash info to
# reduce the set of duplicate functions
def run_on_js(filename, gen_hash_info=False):
js_engine=shared.NODE_JS
js = open(filename).read()
if os.linesep != '\n':
js = js.replace(os.linesep, '\n') # we assume \n in the splitting code
equivalentfn_hash_info = None
passed_in_filename = filename
# Find markers
start_funcs = js.find(start_funcs_marker)
end_funcs = js.rfind(end_funcs_marker)
if start_funcs < 0 or end_funcs < start_funcs:
logging.critical('Invalid input file. Did not contain appropriate markers. (start_funcs: %s, end_funcs: %s)' % (start_funcs, end_funcs))
sys.exit(1)
if not gen_hash_info:
equivalentfn_hash_info = js[js.rfind('//'):]
start_asm = js.find(start_asm_marker)
end_asm = js.rfind(end_asm_marker)
assert (start_asm >= 0) == (end_asm >= 0)
# We need to split out the asm shell as well, for minification
pre = js[:start_asm + len(start_asm_marker)]
post = js[end_asm:]
asm_shell = js[start_asm + len(start_asm_marker):start_funcs + len(start_funcs_marker)] + '''
EMSCRIPTEN_FUNCS();
''' + js[end_funcs + len(end_funcs_marker):end_asm + len(end_asm_marker)]
js = js[start_funcs + len(start_funcs_marker):end_funcs]
# we assume there is a maximum of one new name per line
asm_shell_pre, asm_shell_post = process_shell(js, js_engine, asm_shell, equivalentfn_hash_info).split('EMSCRIPTEN_FUNCS();');
asm_shell_post = asm_shell_post.replace('});', '})');
pre += asm_shell_pre + '\n' + start_funcs_marker
post = end_funcs_marker + asm_shell_post + post
if not gen_hash_info:
# We don't need the extra info at the end
post = post[:post.rfind('//')].strip()
else:
pre = js[:start_funcs + len(start_funcs_marker)]
post = js[end_funcs + len(end_funcs_marker):]
js = js[start_funcs + len(start_funcs_marker):end_funcs]
post = end_funcs_marker + post
total_size = len(js)
funcs = split_funcs(js, False)
js = None
# if we are making source maps, we want our debug numbering to start from the
# top of the file, so avoid breaking the JS into chunks
cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
chunk_size = min(MAX_CHUNK_SIZE, max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
chunks = shared.chunkify(funcs, chunk_size)
chunks = filter(lambda chunk: len(chunk) > 0, chunks)
if DEBUG and len(chunks) > 0: print >> sys.stderr, 'chunkification: num funcs:', len(funcs), 'actual num chunks:', len(chunks), 'chunk size range:', max(map(len, chunks)), '-', min(map(len, chunks))
funcs = None
if len(chunks) > 0:
def write_chunk(chunk, i):
temp_file = temp_files.get('.jsfunc_%d.js' % i).name
f = open(temp_file, 'w')
f.write(chunk)
if not gen_hash_info:
f.write('\n')
f.write(equivalentfn_hash_info)
f.close()
return temp_file
filenames = [write_chunk(chunks[i], i) for i in range(len(chunks))]
else:
filenames = []
old_filenames = filenames[:]
if len(filenames) > 0:
commands = map(lambda filename: js_engine + [DUPLICATE_FUNCTION_ELIMINATOR, filename, '--gen-hash-info' if gen_hash_info else '--use-hash-info', '--no-minimize-whitespace'], filenames)
if DEBUG and commands is not None:
print >> sys.stderr, [' '.join(command if command is not None else '(null)') for command in commands]
cores = min(cores, len(filenames))
if len(chunks) > 1 and cores >= 2:
# We can parallelize
if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores (total: %.2f MB)' % (len(chunks), cores, total_size/(1024*1024.))
pool = multiprocessing.Pool(processes=cores)
filenames = pool.map(run_on_chunk, commands, chunksize=1)
else:
# We can't parallize, but still break into chunks to avoid uglify/node memory issues
if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks' % (len(chunks))
filenames = [run_on_chunk(command) for command in commands]
else:
filenames = []
json_files = []
# We're going to be coalescing the files back at the end
# Just replace the file list with the ones provided in
# the command list - and save off the generated Json
if gen_hash_info:
json_files = filenames[:]
filenames = old_filenames[:]
for filename in filenames: temp_files.note(filename)
filename += '.jo.js'
f = open(filename, 'w')
f.write(pre);
pre = None
# sort functions by size, to make diffing easier and to improve aot times
funcses = []
for out_file in filenames:
funcses.append(split_funcs(open(out_file).read(), False))
funcs = [item for sublist in funcses for item in sublist]
funcses = None
def sorter(x, y):
diff = len(y[1]) - len(x[1])
if diff != 0: return diff
if x[0] < y[0]: return 1
elif x[0] > y[0]: return -1
return 0
if not os.environ.get('EMCC_NO_OPT_SORT'):
funcs.sort(sorter)
for func in funcs:
f.write(func[1])
funcs = None
f.write('\n')
f.write(post);
# No need to write suffix: if there was one, it is inside post which exists when suffix is there
f.write('\n')
if gen_hash_info and len(json_files) > 0:
write_equivalent_fn_hash_to_file(f, json_files, passed_in_filename)
f.close()
return filename
def save_temp_file(file_to_process):
if os.environ.get('EMSCRIPTEN_SAVE_TEMP_FILES') and os.environ.get('EMSCRIPTEN_TEMP_FILES_DIR'):
destinationFile = file_to_process
temp_dir_name = os.environ.get('TEMP_DIR')
destinationFile = destinationFile.replace(temp_dir_name, os.environ.get('EMSCRIPTEN_TEMP_FILES_DIR'))
if not os.path.exists(os.path.dirname(destinationFile)):
os.makedirs(os.path.dirname(destinationFile))
print >> sys.stderr, "Copying {} to {}".format(file_to_process, destinationFile)
shutil.copyfile(file_to_process, destinationFile)
def get_func_names(javascript_file):
func_names = []
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open (javascript_file, 'rt') as fin:
blob = "".join(fin.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
for match in re.finditer('function (\S+?)\s*\(', asm_chunk):
func_names.append(match.groups(1)[0])
return func_names
def eliminate_duplicate_funcs(file_name):
if shared.Settings.ELIMINATE_DUPLICATE_FUNCTIONS_DUMP_EQUIVALENT_FUNCTIONS != 0:
# Remove previous log file if it exists
equivalent_fn_json_file = file_name + ".equivalent_functions.json"
if os.path.isfile(equivalent_fn_json_file):
print >> sys.stderr, "Deleting old json: " + equivalent_fn_json_file
os.remove(equivalent_fn_json_file)
old_funcs = get_func_names(file_name)
for pass_num in range(shared.Settings.ELIMINATE_DUPLICATE_FUNCTIONS_PASSES):
if DEBUG: print >> sys.stderr, "[PASS {}]: eliminating duplicate functions in: {}.".format(pass_num, file_name)
# Generate the JSON for the equivalent hash first
processed_file = run_on_js(filename=file_name, gen_hash_info=True)
save_temp_file(processed_file)
# Use the hash to reduce the JS file
final_file = run_on_js(filename=processed_file, gen_hash_info=False)
save_temp_file(final_file)
shared.safe_move(final_file, file_name)
if shared.Settings.ELIMINATE_DUPLICATE_FUNCTIONS_DUMP_EQUIVALENT_FUNCTIONS != 0:
new_funcs = get_func_names(file_name)
eliminated_funcs_file = file_name + ".eliminated_functions.json"
print >> sys.stderr, "Writing eliminated functions to file: {}".format(eliminated_funcs_file)
with open(eliminated_funcs_file, 'w') as fout:
eliminated_functions = list(set(old_funcs)-set(new_funcs))
eliminated_functions.sort()
for eliminated_function in eliminated_functions:
fout.write('{}\n'.format(eliminated_function))
def run(filename, js_engine=shared.NODE_JS):
js_engine = shared.listify(js_engine)
return temp_files.run_and_clean(lambda: eliminate_duplicate_funcs(filename))
if __name__ == '__main__':
out = run(sys.argv[1], sys.argv[2:])
|
the-stack_0_20956 | # coding: utf-8
# Program by Thomas W. Miller, August 16, 2018
# Previous work involved gathering embeddings via chakin
# Following methods described in
# https://github.com/chakki-works/chakin
# The previous program, run-chakin-to-get-embeddings-v001.py
# downloaded pre-trained GloVe embeddings, saved them in a zip archive,
# and unzipped that archive to create the four word-to-embeddings
# text files for use in language models.
# This program sets uses word embeddings to set up defaultdict
# dictionary data structures, that can them be employed in language
# models. This is demonstrated with a simple RNN model for predicting
# sentiment (thumbs-down versus thumbs-up) for movie reviews.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os # operating system functions
import os.path # for manipulation of file path names
import re # regular expressions
from collections import defaultdict
import nltk
from nltk.tokenize import TreebankWordTokenizer
import tensorflow as tf
RANDOM_SEED = 9999
# To make output stable across runs
def reset_graph(seed= RANDOM_SEED):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
REMOVE_STOPWORDS = False # no stopword removal
EVOCABSIZE = 50000 # specify desired size of pre-defined embedding vocabulary
# -------------------------------------------------------------
# Select the pre-defined embeddings source
# Define vocabulary size for the language model
# Create a word_to_embedding_dict for GloVe.6B.50d
embeddings_directory = 'embeddings/gloVe.6B'
filename = 'glove.6B.50d.txt'
embeddings_filename = os.path.join(embeddings_directory, filename)
# -------------------------------------------------------------
# Utility function for loading embeddings follows methods described in
# https://github.com/guillaume-chevalier/GloVe-as-a-TensorFlow-Embedding-Layer
# Creates the Python defaultdict dictionary word_to_embedding_dict
# for the requested pre-trained word embeddings
#
# Note the use of defaultdict data structure from the Python Standard Library
# collections_defaultdict.py lets the caller specify a default value up front
# The default value will be retuned if the key is not a known dictionary key
# That is, unknown words are represented by a vector of zeros
# For word embeddings, this default value is a vector of zeros
# Documentation for the Python standard library:
# Hellmann, D. 2017. The Python 3 Standard Library by Example. Boston:
# Addison-Wesley. [ISBN-13: 978-0-13-429105-5]
def load_embedding_from_disks(embeddings_filename, with_indexes=True):
"""
Read a embeddings txt file. If `with_indexes=True`,
we return a tuple of two dictionnaries
`(word_to_index_dict, index_to_embedding_array)`,
otherwise we return only a direct
`word_to_embedding_dict` dictionnary mapping
from a string to a numpy array.
"""
if with_indexes:
word_to_index_dict = dict()
index_to_embedding_array = []
else:
word_to_embedding_dict = dict()
with open(embeddings_filename, 'r', encoding='utf-8') as embeddings_file:
for (i, line) in enumerate(embeddings_file):
split = line.split(' ')
word = split[0]
representation = split[1:]
representation = np.array(
[float(val) for val in representation]
)
if with_indexes:
word_to_index_dict[word] = i
index_to_embedding_array.append(representation)
else:
word_to_embedding_dict[word] = representation
# Empty representation for unknown words.
_WORD_NOT_FOUND = [0.0] * len(representation)
if with_indexes:
_LAST_INDEX = i + 1
word_to_index_dict = defaultdict(
lambda: _LAST_INDEX, word_to_index_dict)
index_to_embedding_array = np.array(
index_to_embedding_array + [_WORD_NOT_FOUND])
return word_to_index_dict, index_to_embedding_array
else:
word_to_embedding_dict = defaultdict(lambda: _WORD_NOT_FOUND)
return word_to_embedding_dict
print('\nLoading embeddings from', embeddings_filename)
word_to_index, index_to_embedding = \
load_embedding_from_disks(embeddings_filename, with_indexes=True)
print("Embedding loaded from disks.")
# Note: unknown words have representations with values [0, 0, ..., 0]
# Additional background code from
# https://github.com/guillaume-chevalier/GloVe-as-a-TensorFlow-Embedding-Layer
# shows the general structure of the data structures for word embeddings
# This code is modified for our purposes in language modeling
vocab_size, embedding_dim = index_to_embedding.shape
print("Embedding is of shape: {}".format(index_to_embedding.shape))
print("This means (number of words, number of dimensions per word)\n")
print("The first words are words that tend occur more often.")
print("Note: for unknown words, the representation is an empty vector,\n"
"and the index is the last one. The dictionnary has a limit:")
print(" {} --> {} --> {}".format("A word", "Index in embedding",
"Representation"))
word = "worsdfkljsdf" # a word obviously not in the vocabulary
idx = word_to_index[word] # index for word obviously not in the vocabulary
complete_vocabulary_size = idx
embd = list(np.array(index_to_embedding[idx], dtype=int)) # "int" compact print
print(" {} --> {} --> {}".format(word, idx, embd))
word = "the"
idx = word_to_index[word]
embd = list(index_to_embedding[idx]) # "int" for compact print only.
print(" {} --> {} --> {}".format(word, idx, embd))
# Show how to use embeddings dictionaries with a test sentence
# This is a famous typing exercise with all letters of the alphabet
# https://en.wikipedia.org/wiki/The_quick_brown_fox_jumps_over_the_lazy_dog
a_typing_test_sentence = 'The quick brown fox jumps over the lazy dog'
print('\nTest sentence: ', a_typing_test_sentence, '\n')
words_in_test_sentence = a_typing_test_sentence.split()
print('Test sentence embeddings from complete vocabulary of',
complete_vocabulary_size, 'words:\n')
for word in words_in_test_sentence:
word_ = word.lower()
embedding = index_to_embedding[word_to_index[word_]]
print(word_ + ": ", embedding)
# -------------------------------------------------------------
# Define vocabulary size for the language model
# To reduce the size of the vocabulary to the n most frequently used words
def default_factory():
return EVOCABSIZE # last/unknown-word row in limited_index_to_embedding
# dictionary has the items() function, returns list of (key, value) tuples
limited_word_to_index = defaultdict(default_factory, \
{k: v for k, v in word_to_index.items() if v < EVOCABSIZE})
# Select the first EVOCABSIZE rows to the index_to_embedding
limited_index_to_embedding = index_to_embedding[0:EVOCABSIZE,:]
# Set the unknown-word row to be all zeros as previously
limited_index_to_embedding = np.append(limited_index_to_embedding,
index_to_embedding[index_to_embedding.shape[0] - 1, :].\
reshape(1,embedding_dim),
axis = 0)
# Delete large numpy array to clear some CPU RAM
del index_to_embedding
# Verify the new vocabulary: should get same embeddings for test sentence
# Note that a small EVOCABSIZE may yield some zero vectors for embeddings
print('\nTest sentence embeddings from vocabulary of', EVOCABSIZE, 'words:\n')
for word in words_in_test_sentence:
word_ = word.lower()
embedding = limited_index_to_embedding[limited_word_to_index[word_]]
print(word_ + ": ", embedding)
# ------------------------------------------------------------
# code for working with movie reviews data
# Source: Miller, T. W. (2016). Web and Network Data Science.
# Upper Saddle River, N.J.: Pearson Education.
# ISBN-13: 978-0-13-388644-3
# This original study used a simple bag-of-words approach
# to sentiment analysis, along with pre-defined lists of
# negative and positive words.
# Code available at: https://github.com/mtpa/wnds
# ------------------------------------------------------------
# Utility function to get file names within a directory
def listdir_no_hidden(path):
start_list = os.listdir(path)
end_list = []
for file in start_list:
if (not file.startswith('.')):
end_list.append(file)
return(end_list)
# define list of codes to be dropped from document
# carriage-returns, line-feeds, tabs
codelist = ['\r', '\n', '\t']
# We will not remove stopwords in this exercise because they are
# important to keeping sentences intact
if REMOVE_STOPWORDS:
print(nltk.corpus.stopwords.words('english'))
# previous analysis of a list of top terms showed a number of words, along
# with contractions and other word strings to drop from further analysis, add
# these to the usual English stopwords to be dropped from a document collection
more_stop_words = ['cant','didnt','doesnt','dont','goes','isnt','hes',\
'shes','thats','theres','theyre','wont','youll','youre','youve', 'br'\
've', 're', 'vs']
some_proper_nouns_to_remove = ['dick','ginger','hollywood','jack',\
'jill','john','karloff','kudrow','orson','peter','tcm','tom',\
'toni','welles','william','wolheim','nikita']
# start with the initial list and add to it for movie text work
stoplist = nltk.corpus.stopwords.words('english') + more_stop_words +\
some_proper_nouns_to_remove
# text parsing function for creating text documents
# there is more we could do for data preparation
# stemming... looking for contractions... possessives...
# but we will work with what we have in this parsing function
# if we want to do stemming at a later time, we can use
# porter = nltk.PorterStemmer()
# in a construction like this
# words_stemmed = [porter.stem(word) for word in initial_words]
def text_parse(string):
# replace non-alphanumeric with space
temp_string = re.sub('[^a-zA-Z]', ' ', string)
# replace codes with space
for i in range(len(codelist)):
stopstring = ' ' + codelist[i] + ' '
temp_string = re.sub(stopstring, ' ', temp_string)
# replace single-character words with space
temp_string = re.sub('\s.\s', ' ', temp_string)
# convert uppercase to lowercase
temp_string = temp_string.lower()
if REMOVE_STOPWORDS:
# replace selected character strings/stop-words with space
for i in range(len(stoplist)):
stopstring = ' ' + str(stoplist[i]) + ' '
temp_string = re.sub(stopstring, ' ', temp_string)
# replace multiple blank characters with one blank character
temp_string = re.sub('\s+', ' ', temp_string)
return(temp_string)
# -----------------------------------------------
# gather data for 500 negative movie reviews
# -----------------------------------------------
dir_name = 'movie-reviews-negative'
filenames = listdir_no_hidden(path=dir_name)
num_files = len(filenames)
for i in range(len(filenames)):
file_exists = os.path.isfile(os.path.join(dir_name, filenames[i]))
assert file_exists
print('\nDirectory:',dir_name)
print('%d files found' % len(filenames))
# Read data for negative movie reviews
# Data will be stored in a list of lists where the each list represents
# a document and document is a list of words.
# We then break the text into words.
def read_data(filename):
with open(filename, encoding='utf-8') as f:
data = tf.compat.as_str(f.read())
data = data.lower()
data = text_parse(data)
data = TreebankWordTokenizer().tokenize(data) # The Penn Treebank
return data
negative_documents = []
print('\nProcessing document files under', dir_name)
for i in range(num_files):
## print(' ', filenames[i])
words = read_data(os.path.join(dir_name, filenames[i]))
negative_documents.append(words)
# print('Data size (Characters) (Document %d) %d' %(i,len(words)))
# print('Sample string (Document %d) %s'%(i,words[:50]))
# -----------------------------------------------
# gather data for 500 positive movie reviews
# -----------------------------------------------
dir_name = 'movie-reviews-positive'
filenames = listdir_no_hidden(path=dir_name)
num_files = len(filenames)
for i in range(len(filenames)):
file_exists = os.path.isfile(os.path.join(dir_name, filenames[i]))
assert file_exists
print('\nDirectory:',dir_name)
print('%d files found' % len(filenames))
# Read data for positive movie reviews
# Data will be stored in a list of lists where the each list
# represents a document and document is a list of words.
# We then break the text into words.
def read_data(filename):
with open(filename, encoding='utf-8') as f:
data = tf.compat.as_str(f.read())
data = data.lower()
data = text_parse(data)
data = TreebankWordTokenizer().tokenize(data) # The Penn Treebank
return data
positive_documents = []
print('\nProcessing document files under', dir_name)
for i in range(num_files):
## print(' ', filenames[i])
words = read_data(os.path.join(dir_name, filenames[i]))
positive_documents.append(words)
# print('Data size (Characters) (Document %d) %d' %(i,len(words)))
# print('Sample string (Document %d) %s'%(i,words[:50]))
# -----------------------------------------------------
# convert positive/negative documents into numpy array
# note that reviews vary from 22 to 1052 words
# so we use the first 20 and last 20 words of each review
# as our word sequences for analysis
# -----------------------------------------------------
max_review_length = 0 # initialize
for doc in negative_documents:
max_review_length = max(max_review_length, len(doc))
for doc in positive_documents:
max_review_length = max(max_review_length, len(doc))
print('max_review_length:', max_review_length)
min_review_length = max_review_length # initialize
for doc in negative_documents:
min_review_length = min(min_review_length, len(doc))
for doc in positive_documents:
min_review_length = min(min_review_length, len(doc))
print('min_review_length:', min_review_length)
# construct list of 1000 lists with 40 words in each list
from itertools import chain
documents = []
for doc in negative_documents:
doc_begin = doc[0:20]
doc_end = doc[len(doc) - 20: len(doc)]
documents.append(list(chain(*[doc_begin, doc_end])))
for doc in positive_documents:
doc_begin = doc[0:20]
doc_end = doc[len(doc) - 20: len(doc)]
documents.append(list(chain(*[doc_begin, doc_end])))
# create list of lists of lists for embeddings
embeddings = []
for doc in documents:
embedding = []
for word in doc:
embedding.append(limited_index_to_embedding[limited_word_to_index[word]])
embeddings.append(embedding)
# -----------------------------------------------------
# Check on the embeddings list of list of lists
# -----------------------------------------------------
# Show the first word in the first document
test_word = documents[0][0]
print('First word in first document:', test_word)
print('Embedding for this word:\n',
limited_index_to_embedding[limited_word_to_index[test_word]])
print('Corresponding embedding from embeddings list of list of lists\n',
embeddings[0][0][:])
# Show the seventh word in the tenth document
test_word = documents[6][9]
print('First word in first document:', test_word)
print('Embedding for this word:\n',
limited_index_to_embedding[limited_word_to_index[test_word]])
print('Corresponding embedding from embeddings list of list of lists\n',
embeddings[6][9][:])
# Show the last word in the last document
test_word = documents[999][39]
print('First word in first document:', test_word)
print('Embedding for this word:\n',
limited_index_to_embedding[limited_word_to_index[test_word]])
print('Corresponding embedding from embeddings list of list of lists\n',
embeddings[999][39][:])
# -----------------------------------------------------
# Make embeddings a numpy array for use in an RNN
# Create training and test sets with Scikit Learn
# -----------------------------------------------------
embeddings_array = np.array(embeddings)
# Define the labels to be used 500 negative (0) and 500 positive (1)
thumbs_down_up = np.concatenate((np.zeros((500), dtype = np.int32),
np.ones((500), dtype = np.int32)), axis = 0)
# Scikit Learn for random splitting of the data
from sklearn.model_selection import train_test_split
# Random splitting of the data in to training (80%) and test (20%)
X_train, X_test, y_train, y_test = \
train_test_split(embeddings_array, thumbs_down_up, test_size=0.20,
random_state = RANDOM_SEED)
# --------------------------------------------------------------------------
# We use a very simple Recurrent Neural Network for this assignment
# Géron, A. 2017. Hands-On Machine Learning with Scikit-Learn & TensorFlow:
# Concepts, Tools, and Techniques to Build Intelligent Systems.
# Sebastopol, Calif.: O'Reilly. [ISBN-13 978-1-491-96229-9]
# Chapter 14 Recurrent Neural Networks, pages 390-391
# Source code available at https://github.com/ageron/handson-ml
# Jupyter notebook file 14_recurrent_neural_networks.ipynb
# See section on Training an sequence Classifier, # In [34]:
# which uses the MNIST case data... we revise to accommodate
# the movie review data in this assignment
# --------------------------------------------------------------------------
reset_graph()
n_steps = embeddings_array.shape[1] # number of words per document
n_inputs = embeddings_array.shape[2] # dimension of pre-trained embeddings
n_neurons = 20 # analyst specified number of neurons
n_outputs = 2 # thumbs-down or thumbs-up
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
logits = tf.layers.dense(states, n_outputs)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
n_epochs = 50
batch_size = 100
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
print('\n ---- Epoch ', epoch, ' ----\n')
for iteration in range(y_train.shape[0] // batch_size):
X_batch = X_train[iteration*batch_size:(iteration + 1)*batch_size,:]
y_batch = y_train[iteration*batch_size:(iteration + 1)*batch_size]
print(' Batch ', iteration, ' training observations from ',
iteration*batch_size, ' to ', (iteration + 1)*batch_size-1,)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print('\n Train accuracy:', acc_train, 'Test accuracy:', acc_test)
|
the-stack_0_20957 | from stix_shifter_utils.utils.base_entry_point import BaseEntryPoint
# from stix_shifter_modules.qradar_perf_test.stix_translation.results_translator import ResultsTranslator
class EntryPoint(BaseEntryPoint):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(connection, configuration, options)
if connection:
self.setup_transmission_basic(connection, configuration)
self.setup_translation_simple(dialect_default='events')
|
the-stack_0_20959 | import logging
import os
import random
import numpy as np
import torch
def set_random_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def convert_ddp_model_dict(src_state):
dst_state = {}
for k in src_state.keys():
dst_state[k[7:]] = src_state[k]
return dst_state
def define_logger(logdir, logname):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(logdir, logname))
handler.setFormatter(logging.Formatter('%(asctime)s - %(filename)s - %(message)s'))
logger.addHandler(handler)
return logger
|
the-stack_0_20960 | # Copyright 2015 [email protected]
# Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
LogApiHeader = collections.namedtuple('LogApiHeader', ['name', 'is_required'])
"""Tuple describing a header."""
X_TENANT_ID = LogApiHeader(name='X-Tenant-Id', is_required=False)
X_ROLES = LogApiHeader(name='X-Roles', is_required=False)
X_APPLICATION_TYPE = LogApiHeader(name='X-Application-Type', is_required=False)
X_DIMENSIONS = LogApiHeader(name='X_Dimensions', is_required=False)
|
the-stack_0_20962 | """Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
m.tp_duration_hrs[tp]
* sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.Zone_Power_Injections.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.Cost_Components_Per_TP.append('UnservedLoadPenalty')
# amount of unserved reserves during each timepoint
m.UnservedUpReserves = Var(m.TIMEPOINTS, within=NonNegativeReals)
m.UnservedDownReserves = Var(m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved reserves (90% as high as cost of unserved load,
# to make the model prefer to serve load when possible)
m.UnservedReservePenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
m.tp_duration_hrs[tp]
* 0.9
* m.unserved_load_penalty_per_mwh
* (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp])
)
# add the unserved load penalty to the model's objective function
m.Cost_Components_Per_TP.append('UnservedReservePenalty')
|
the-stack_0_20963 | import fnmatch
import os
import sys
from tkinter import StringVar, BooleanVar
from tkinter.ttk import Checkbutton
from idlelib.searchbase import SearchDialogBase
from idlelib import searchengine
def grep(text, io=None, flist=None):
root = text._root()
engine = searchengine.get(root)
if not hasattr(engine, '_grepdialog'):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get('sel.first', 'sel.last')
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = 'Find in Files Dialog'
icon = 'Grep'
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io=None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ''
else:
path = ''
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = '.py'
self.globvar.set(os.path.join(dir, '*' + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry('In files:', self.globvar)[0]
def create_other_buttons(self):
btn = Checkbutton(self.make_frame()[0], variable=self.recvar, text=
'Recurse down subdirectories')
btn.pack(side='top', fill='both')
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button('Search Files', self.default_command, 1)
def default_command(self, event=None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.outwin import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print('Searching %r in %s ...' % (pat, path))
hits = 0
try:
for fn in list:
try:
with open(fn, errors='replace') as f:
for lineno, line in enumerate(f, 1):
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write('%s: %s: %s\n' % (fn,
lineno, line))
hits += 1
except OSError as msg:
print(msg)
print('Hits found: %s\n(Hint: right-click to open locations.)' %
hits if hits else 'No hits.')
except AttributeError:
pass
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except OSError as msg:
print(msg)
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
elif fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
def _grep_dialog(parent):
from tkinter import Toplevel, Text, SEL, END
from tkinter.ttk import Button
from idlelib.pyshell import PyShellFileList
top = Toplevel(parent)
top.title('Test GrepDialog')
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry('+%d+%d' % (x, y + 175))
flist = PyShellFileList(top)
text = Text(top, height=5)
text.pack()
def show_grep_dialog():
text.tag_add(SEL, '1.0', END)
grep(text, flist=flist)
text.tag_remove(SEL, '1.0', END)
button = Button(top, text='Show GrepDialog', command=show_grep_dialog)
button.pack()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_grep_dialog)
|
the-stack_0_20964 | import numpy as np
import dask
from os import path
from cubequery.tasks import CubeQueryTask, Parameter, DType
from datacube_utilities import import_export
from datacube_utilities.query import (
create_base_query,
create_product_measurement,
is_dataset_empty,
)
class WaterChange(CubeQueryTask):
"""
This task uses changes in water cover to identify water change.
"""
display_name = "Water Change"
description = "Indicates the spatial change in water-covered surface area, between two time periods, taking an input of the water permanency product."
img_url = "https://arcgis01.satapps.org/portal//sharing/rest/content/items/a499849ccd1f4c7fb0403b4c719f9dc1/resources/vF_Water%20Change.png?v=1601648787333"
info_url = "https://arcgis01.satapps.org/portal/apps/sites/?fromEdit=true#/data/pages/data-cube"
parameters = [
Parameter("aoi", "Area Of Interest", DType.WKT, "Area of interest."),
Parameter(
"output_projection",
"Output Projection",
DType.STRING,
"Projection to generate the output in.",
["EPSG:3460"]
),
Parameter(
"baseline_start_date",
"Baseline Start Date",
DType.DATE,
"Start date of the period to use for the baseline.",
),
Parameter(
"baseline_end_date",
"Baseline End Date",
DType.DATE,
"End date of the period to use for the baseline.",
),
Parameter(
"analysis_start_date",
"Analysis Start Date",
DType.DATE,
"Start date of the period to use for the analysis.",
),
Parameter(
"analysis_end_date",
"Analysis End Date",
DType.DATE,
"End date of the period to use for the analysis.",
),
Parameter(
"platform_base",
"Baseline Satellite",
DType.STRING,
"Satellite to use for the baseline.",
["SENTINEL_2", "LANDSAT_4", "LANDSAT_5", "LANDSAT_7", "LANDSAT_8"],
),
Parameter(
"platform_analysis",
"Analysis Satellite",
DType.STRING,
"Satellite to use for the analysis.",
["SENTINEL_2", "LANDSAT_4", "LANDSAT_5", "LANDSAT_7", "LANDSAT_8"],
),
Parameter(
"res",
"Resolution in meters",
DType.INT,
"Pixel resolution in meters.",
[10, 500],
),
Parameter("aoi_crs", "Area Of Interest CRS", DType.STRING, "CRS of the Area of Interest.", ["EPSG:4326"]),
]
CubeQueryTask.cal_significant_kwargs(parameters)
def generate_product(
self,
dc,
path_prefix,
aoi,
output_projection,
baseline_start_date,
baseline_end_date,
analysis_start_date,
analysis_end_date,
platform_base,
platform_analysis,
res,
aoi_crs,
**kwargs,
):
## Create datacube query
dask_chunks = dict(time=1, x=2000, y=2000)
query = create_base_query(aoi, res, output_projection, aoi_crs, dask_chunks)
all_measurements = ["green", "red", "blue", "nir", "swir1", "swir2"]
(
_baseline_product,
_baseline_measurement,
baseline_water_product,
) = create_product_measurement(platform_base, all_measurements)
(
_analysis_product,
_analysis_measurement,
analysis_water_product,
) = create_product_measurement(platform_analysis, all_measurements)
baseline_time_period = (baseline_start_date, baseline_end_date)
analysis_time_period = (analysis_start_date, analysis_end_date)
## Create dask graph
baseline_ds = dc.load(
time=baseline_time_period,
platform=platform_base,
product=baseline_water_product,
measurements=["water_classification"],
**query,
)
analysis_ds = dc.load(
time=analysis_time_period,
platform=platform_analysis,
product=analysis_water_product,
measurements=["water_classification"],
**query,
)
if is_dataset_empty(baseline_ds):
raise Exception(
"DataCube Load returned an empty Dataset."
+ "Please check load parameters for Baseline Dataset!"
)
if is_dataset_empty(analysis_ds):
raise Exception(
"DataCube Load returned an empty Dataset."
+ "Please check load parameters for Analysis Dataset!"
)
wc_baseline = baseline_ds.where(baseline_ds >= 0)
wc_analysis = analysis_ds.where(analysis_ds >= 0)
wc_baseline_mean = wc_baseline.water_classification.mean(dim="time")
wc_analysis_mean = wc_analysis.water_classification.mean(dim="time")
waterpres_prob = 0.3
T0_nd_water = np.isnan(wc_baseline_mean)
wc_baseline_rc_int = wc_baseline_mean.where(
(wc_baseline_mean < waterpres_prob) | (T0_nd_water == True), 1
) # fix > prob to water
wc_baseline_rc = wc_baseline_rc_int.where(
(wc_baseline_rc_int >= waterpres_prob) | (T0_nd_water == True), 0
) # fix < prob to no water
T1_nd_water = np.isnan(wc_analysis_mean)
wc_analysis_rc_int = wc_analysis_mean.where(
(wc_analysis_mean < waterpres_prob) | (T1_nd_water == True), 1
) # fix > prob to water
wc_analysis_rc = wc_analysis_rc_int.where(
(wc_analysis_rc_int >= waterpres_prob) | (T1_nd_water == True), 0
) # fix < prob to no water
# Outputs
difference = wc_analysis_rc - wc_baseline_rc
difference_range = wc_analysis_mean - wc_baseline_mean
## Compute
difference_output, difference_range_output = dask.compute(
difference, difference_range
)
## Write files
result = []
file_name = path.join(path_prefix, "difference_range.tiff")
import_export.export_xarray_to_geotiff(
difference_range_output,
file_name,
crs=output_projection,
x_coord="x",
y_coord="y",
)
result.append(file_name)
file_name = path.join(path_prefix, "difference.tiff")
import_export.export_xarray_to_geotiff(
difference_output,
file_name,
crs=output_projection,
x_coord="x",
y_coord="y",
)
result.append(file_name)
return result
|
the-stack_0_20967 | #------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply
#
# Author: Riverbank Computing Limited
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
# Major package imports.
from pyface.qt import QtCore, QtGui
# Enthought library imports.
from traits.api import Bool, Enum, Int, provides, Str, Unicode
# Local imports.
from pyface.i_dialog import IDialog, MDialog
from pyface.constant import OK, CANCEL, YES, NO
from .window import Window
# Map PyQt dialog related constants to the pyface equivalents.
_RESULT_MAP = {
int(QtGui.QDialog.Accepted): OK,
int(QtGui.QDialog.Rejected): CANCEL,
int(QtGui.QMessageBox.Ok): OK,
int(QtGui.QMessageBox.Cancel): CANCEL,
int(QtGui.QMessageBox.Yes): YES,
int(QtGui.QMessageBox.No): NO
}
@provides(IDialog)
class Dialog(MDialog, Window):
""" The toolkit specific implementation of a Dialog. See the IDialog
interface for the API documentation.
"""
#### 'IDialog' interface ##################################################
cancel_label = Unicode
help_id = Str
help_label = Unicode
ok_label = Unicode
resizeable = Bool(True)
return_code = Int(OK)
style = Enum('modal', 'nonmodal')
#### 'IWindow' interface ##################################################
title = Unicode("Dialog")
###########################################################################
# Protected 'IDialog' interface.
###########################################################################
def _create_buttons(self, parent):
buttons = QtGui.QDialogButtonBox()
# 'OK' button.
if self.ok_label:
btn = buttons.addButton(self.ok_label,
QtGui.QDialogButtonBox.AcceptRole)
else:
btn = buttons.addButton(QtGui.QDialogButtonBox.Ok)
btn.setDefault(True)
btn.clicked.connect(self.control.accept)
# 'Cancel' button.
if self.cancel_label:
btn = buttons.addButton(self.cancel_label,
QtGui.QDialogButtonBox.RejectRole)
else:
btn = buttons.addButton(QtGui.QDialogButtonBox.Cancel)
btn.clicked.connect(self.control.reject)
# 'Help' button.
# FIXME v3: In the original code the only possible hook into the help
# was to reimplement self._on_help(). However this was a private
# method. Obviously nobody uses the Help button. For the moment we
# display it but can't actually use it.
if len(self.help_id) > 0:
if self.help_label:
buttons.addButton(self.help_label, QtGui.QDialogButtonBox.HelpRole)
else:
buttons.addButton(QtGui.QDialogButtonBox.Help)
return buttons
def _create_contents(self, parent):
layout = QtGui.QVBoxLayout()
if not self.resizeable:
layout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
layout.addWidget(self._create_dialog_area(parent))
layout.addWidget(self._create_buttons(parent))
parent.setLayout(layout)
def _create_dialog_area(self, parent):
panel = QtGui.QWidget(parent)
panel.setMinimumSize(QtCore.QSize(100, 200))
palette = panel.palette()
palette.setColor(QtGui.QPalette.Window, QtGui.QColor('red'))
panel.setPalette(palette)
panel.setAutoFillBackground(True)
return panel
def _show_modal(self):
self.control.setWindowModality(QtCore.Qt.ApplicationModal)
retval = self.control.exec_()
return _RESULT_MAP[retval]
###########################################################################
# Protected 'IWidget' interface.
###########################################################################
def _create_control(self, parent):
dlg = QtGui.QDialog(parent)
# Setting return code and firing close events is handled for 'modal' in
# MDialog's open method. For 'nonmodal', we do it here.
if self.style == 'nonmodal':
dlg.finished.connect(self._finished_fired)
if self.size != (-1, -1):
dlg.resize(*self.size)
if self.position != (-1, -1):
dlg.move(*self.position)
dlg.setWindowTitle(self.title)
return dlg
###########################################################################
# Private interface.
###########################################################################
def _finished_fired(self, result):
""" Called when the dialog is closed (and nonmodal). """
self.return_code = _RESULT_MAP[result]
self.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.