code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
class View:
<|reserved_special_token_0|>
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = 200, 0, 0
self._white = 255, 255, 255
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class View:
<|reserved_special_token_0|>
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = 200, 0, 0
self._white = 255, 255, 255
def start(self):
""" Start the display. """
self._screen = self._display.set_mode((640, 480))
self._display.set_caption('PolyominOhs!')
self._pygame.mouse.set_visible(0)
def update(self):
""" Update the screen. """
h, s, v = rgb2hsv(self._cycle_colour)
h += 1
self._cycle_colour = hsv2rgb((h, s, v))
if self._state == config.GS_LOADING:
self._screen.blit(self._background, (0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:
selected = self._interface.get_selection()
settings = {config.MENU_LEVEL: str(self._interface.get_level()),
config.MENU_ORDER: str(self._interface.get_order()), config
.MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:
self._interface.get_music()}
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.
_cycle_colour, self._pygame, True)
for button in self._buttons.items():
if button[0] == selected:
button[1].draw(self._screen, config.TXT_HOVER, self.
_pygame, self._cycle_colour)
else:
button[1].draw(self._screen, config.TXT_NORMAL, self.
_pygame)
for radio in self._radios.items():
if radio[0] == selected:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_HOVER, self._cycle_colour, self._pygame)
else:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_NORMAL, self._cycle_colour, self._pygame)
order = self._interface.get_order()
ominoes = self._master._ominoes[order - 1]
n = self._interface.get_random_omino()
shape = ominoes[0][n]
draw_polyomino(self._screen, (400, 160), shape, 21, self.
_cycle_colour, self._pygame)
if self._state == config.GS_MENU_HIGHSCORES:
draw_border(self._highscores, self._cycle_colour, self._pygame)
for i, highscore in enumerate(self._master.get_highscores()):
name, score = highscore
name = name.replace('_', ' ')
if self._interface.get_highscore_highlight() == i:
colour = self._cycle_colour
else:
colour = self._white
draw_text(self._highscores, (20, 10 + (i + 1) * 25),
name, 10, colour, self._pygame)
draw_text(self._highscores, (175, 10 + (i + 1) * 25),
str(score), 10, colour, self._pygame)
self._screen.blit(self._highscores, (200, 100))
if self._state == config.GS_MENU_ENTER_HIGHSCORE:
self._enterhighscore.fill((0, 0, 0))
draw_border(self._enterhighscore, self._cycle_colour, self.
_pygame)
draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,
self._white, self._pygame)
draw_text(self._enterhighscore, (20, 60),
'Please enter your name:', 10, self._white, self._pygame)
draw_text(self._enterhighscore, (70, 170), 'Press return',
10, self._white, self._pygame)
self._name_entry.update(self._interface.get_highscore_name())
self._name_entry.draw(self._enterhighscore, self._interface
.get_name_selected(), self._cycle_colour, self._pygame)
self._screen.blit(self._enterhighscore, (200, 120))
if self._state == config.GS_MENU_HELP:
draw_border(self._help, self._cycle_colour, self._pygame)
self._screen.blit(self._help, (115, 120))
elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.
GS_GAME_OVER]:
score = str(self._interface.get_score())
lines = str(self._interface.get_lines_cleared())
next_omino = self._interface.get_next_omino()
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (445, 155), score, 10, self._white,
self._pygame)
draw_text(self._screen, (445, 215), lines, 10, self._white,
self._pygame)
if self._state == config.GS_GAME:
draw_polyomino(self._screen, (440, 290), next_omino.
get_shape(0), 21, next_omino.get_colour(), self._pygame)
grid = self._interface.get_field().get_complete_grid()
self._grid.fill((0, 0, 0))
draw_border(self._grid, self._cycle_colour, self._pygame)
if self._state == config.GS_GAME:
size = config.sizes[self._interface.get_order()]
draw_grid(self._grid, (5, 5), grid, size, self._pygame)
elif self._state == config.GS_GAME_PAUSED:
draw_text(self._grid, (30, 115), 'Game Paused', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (40, 185), 'Press y to quit', 10,
self._white, self._pygame)
draw_text(self._grid, (30, 215), 'or esc to resume', 10,
self._white, self._pygame)
elif self._state == config.GS_GAME_OVER:
draw_text(self._grid, (42, 115), 'Game Over', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (47, 185), 'Press return', 10, self.
_white, self._pygame)
self._screen.blit(self._grid, (60, 30))
self._display.flip()
def change_state(self, state, interface=None):
""" Change the state of the application and get the new interface
(if given). Set up graphics for the new state if required.
change_state(int, Menu/Game) -> void
"""
self._state = state
if interface != None:
self._interface = interface
if self._state == config.GS_LOADING:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (180, 180), 'Loading...', 36, self.
_white, self._pygame)
elif self._state == config.GS_GAME:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (410, 130), 'Score:', 10, self.
_white, self._pygame)
draw_text(self._background, (410, 190), 'Lines Cleared:', 10,
self._white, self._pygame)
next_text = 'Next ' + config.names[self._interface.get_order()
].title() + ':'
draw_text(self._background, (410, 250), next_text, 10, self.
_white, self._pygame)
w = 210 + 10 - self._interface.get_field().get_size()[0] + 1
h = 420 + 10 - self._interface.get_field().get_size()[1] + 1
self._grid = self._pygame.Surface((w, h))
self._grid = self._grid.convert()
self._grid.fill((0, 0, 0))
self._grid.set_colorkey((0, 0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES]:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (110, 300), 'Settings:', 10, self.
_white, self._pygame)
draw_text(self._background, (130, 340), 'Difficulty Level:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 400), 'Polyomino Order:', 10,
self._white, self._pygame)
draw_text(self._background, (370, 300), 'Audio:', 10, self.
_white, self._pygame)
draw_text(self._background, (400, 340), 'Sound Effects:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 400), 'Music:', 10, self.
_white, self._pygame)
self._buttons = {}
start_game_button = Button('Start Game', 10, (90, 150))
self._buttons.update({config.MENU_START: start_game_button})
view_highscores_button = Button('View Highscores', 10, (90, 180))
self._buttons.update({config.MENU_HIGHSCORES:
view_highscores_button})
help_button = Button('Help', 10, (90, 210))
self._buttons.update({config.MENU_HELP: help_button})
quit_button = Button('Quit', 10, (90, 240))
self._buttons.update({config.MENU_QUIT: quit_button})
self._radios = {}
level_selection = Radio_Selection([str(n + 1) for n in range(9)
], 10, (160, 365))
self._radios.update({config.MENU_LEVEL: level_selection})
order_selection = Radio_Selection([str(n + 1) for n in range(6)
], 10, (160, 425))
self._radios.update({config.MENU_ORDER: order_selection})
sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))
self._radios.update({config.MENU_SFX: sfx_selection})
music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))
self._radios.update({config.MENU_MUSIC: music_selection})
self._highscores = self._pygame.Surface((250, 300))
self._highscores = self._highscores.convert()
self._highscores.fill((0, 0, 0))
draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.
_white, self._pygame)
self._enterhighscore = self._pygame.Surface((250, 210))
self._enterhighscore = self._enterhighscore.convert()
self._enterhighscore.fill((0, 0, 0))
self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))
self._help = self._pygame.Surface((410, 240))
self._help = self._help.convert()
self._help.fill((0, 0, 0))
draw_text(self._help, (15, 10), 'Controls:', 10, self._white,
self._pygame)
draw_text(self._help, (205, 10), 'Instructions:', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,
self._pygame)
draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 105), 'Right - Move Right', 10, self
._white, self._pygame)
draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 165), 'Space - Drop', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,
self._pygame)
text = config.instructions
rect = self._pygame.Rect(0, 0, 190, 190)
instructions = render_textrect(text, 8, rect, self._white, (0,
0, 0), 0, self._pygame)
self._help.blit(instructions, (210, 45))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class View:
""" The view class which handles the visual component of the application.
"""
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = 200, 0, 0
self._white = 255, 255, 255
def start(self):
""" Start the display. """
self._screen = self._display.set_mode((640, 480))
self._display.set_caption('PolyominOhs!')
self._pygame.mouse.set_visible(0)
def update(self):
""" Update the screen. """
h, s, v = rgb2hsv(self._cycle_colour)
h += 1
self._cycle_colour = hsv2rgb((h, s, v))
if self._state == config.GS_LOADING:
self._screen.blit(self._background, (0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:
selected = self._interface.get_selection()
settings = {config.MENU_LEVEL: str(self._interface.get_level()),
config.MENU_ORDER: str(self._interface.get_order()), config
.MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:
self._interface.get_music()}
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.
_cycle_colour, self._pygame, True)
for button in self._buttons.items():
if button[0] == selected:
button[1].draw(self._screen, config.TXT_HOVER, self.
_pygame, self._cycle_colour)
else:
button[1].draw(self._screen, config.TXT_NORMAL, self.
_pygame)
for radio in self._radios.items():
if radio[0] == selected:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_HOVER, self._cycle_colour, self._pygame)
else:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_NORMAL, self._cycle_colour, self._pygame)
order = self._interface.get_order()
ominoes = self._master._ominoes[order - 1]
n = self._interface.get_random_omino()
shape = ominoes[0][n]
draw_polyomino(self._screen, (400, 160), shape, 21, self.
_cycle_colour, self._pygame)
if self._state == config.GS_MENU_HIGHSCORES:
draw_border(self._highscores, self._cycle_colour, self._pygame)
for i, highscore in enumerate(self._master.get_highscores()):
name, score = highscore
name = name.replace('_', ' ')
if self._interface.get_highscore_highlight() == i:
colour = self._cycle_colour
else:
colour = self._white
draw_text(self._highscores, (20, 10 + (i + 1) * 25),
name, 10, colour, self._pygame)
draw_text(self._highscores, (175, 10 + (i + 1) * 25),
str(score), 10, colour, self._pygame)
self._screen.blit(self._highscores, (200, 100))
if self._state == config.GS_MENU_ENTER_HIGHSCORE:
self._enterhighscore.fill((0, 0, 0))
draw_border(self._enterhighscore, self._cycle_colour, self.
_pygame)
draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,
self._white, self._pygame)
draw_text(self._enterhighscore, (20, 60),
'Please enter your name:', 10, self._white, self._pygame)
draw_text(self._enterhighscore, (70, 170), 'Press return',
10, self._white, self._pygame)
self._name_entry.update(self._interface.get_highscore_name())
self._name_entry.draw(self._enterhighscore, self._interface
.get_name_selected(), self._cycle_colour, self._pygame)
self._screen.blit(self._enterhighscore, (200, 120))
if self._state == config.GS_MENU_HELP:
draw_border(self._help, self._cycle_colour, self._pygame)
self._screen.blit(self._help, (115, 120))
elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.
GS_GAME_OVER]:
score = str(self._interface.get_score())
lines = str(self._interface.get_lines_cleared())
next_omino = self._interface.get_next_omino()
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (445, 155), score, 10, self._white,
self._pygame)
draw_text(self._screen, (445, 215), lines, 10, self._white,
self._pygame)
if self._state == config.GS_GAME:
draw_polyomino(self._screen, (440, 290), next_omino.
get_shape(0), 21, next_omino.get_colour(), self._pygame)
grid = self._interface.get_field().get_complete_grid()
self._grid.fill((0, 0, 0))
draw_border(self._grid, self._cycle_colour, self._pygame)
if self._state == config.GS_GAME:
size = config.sizes[self._interface.get_order()]
draw_grid(self._grid, (5, 5), grid, size, self._pygame)
elif self._state == config.GS_GAME_PAUSED:
draw_text(self._grid, (30, 115), 'Game Paused', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (40, 185), 'Press y to quit', 10,
self._white, self._pygame)
draw_text(self._grid, (30, 215), 'or esc to resume', 10,
self._white, self._pygame)
elif self._state == config.GS_GAME_OVER:
draw_text(self._grid, (42, 115), 'Game Over', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (47, 185), 'Press return', 10, self.
_white, self._pygame)
self._screen.blit(self._grid, (60, 30))
self._display.flip()
def change_state(self, state, interface=None):
""" Change the state of the application and get the new interface
(if given). Set up graphics for the new state if required.
change_state(int, Menu/Game) -> void
"""
self._state = state
if interface != None:
self._interface = interface
if self._state == config.GS_LOADING:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (180, 180), 'Loading...', 36, self.
_white, self._pygame)
elif self._state == config.GS_GAME:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (410, 130), 'Score:', 10, self.
_white, self._pygame)
draw_text(self._background, (410, 190), 'Lines Cleared:', 10,
self._white, self._pygame)
next_text = 'Next ' + config.names[self._interface.get_order()
].title() + ':'
draw_text(self._background, (410, 250), next_text, 10, self.
_white, self._pygame)
w = 210 + 10 - self._interface.get_field().get_size()[0] + 1
h = 420 + 10 - self._interface.get_field().get_size()[1] + 1
self._grid = self._pygame.Surface((w, h))
self._grid = self._grid.convert()
self._grid.fill((0, 0, 0))
self._grid.set_colorkey((0, 0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES]:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (110, 300), 'Settings:', 10, self.
_white, self._pygame)
draw_text(self._background, (130, 340), 'Difficulty Level:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 400), 'Polyomino Order:', 10,
self._white, self._pygame)
draw_text(self._background, (370, 300), 'Audio:', 10, self.
_white, self._pygame)
draw_text(self._background, (400, 340), 'Sound Effects:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 400), 'Music:', 10, self.
_white, self._pygame)
self._buttons = {}
start_game_button = Button('Start Game', 10, (90, 150))
self._buttons.update({config.MENU_START: start_game_button})
view_highscores_button = Button('View Highscores', 10, (90, 180))
self._buttons.update({config.MENU_HIGHSCORES:
view_highscores_button})
help_button = Button('Help', 10, (90, 210))
self._buttons.update({config.MENU_HELP: help_button})
quit_button = Button('Quit', 10, (90, 240))
self._buttons.update({config.MENU_QUIT: quit_button})
self._radios = {}
level_selection = Radio_Selection([str(n + 1) for n in range(9)
], 10, (160, 365))
self._radios.update({config.MENU_LEVEL: level_selection})
order_selection = Radio_Selection([str(n + 1) for n in range(6)
], 10, (160, 425))
self._radios.update({config.MENU_ORDER: order_selection})
sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))
self._radios.update({config.MENU_SFX: sfx_selection})
music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))
self._radios.update({config.MENU_MUSIC: music_selection})
self._highscores = self._pygame.Surface((250, 300))
self._highscores = self._highscores.convert()
self._highscores.fill((0, 0, 0))
draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.
_white, self._pygame)
self._enterhighscore = self._pygame.Surface((250, 210))
self._enterhighscore = self._enterhighscore.convert()
self._enterhighscore.fill((0, 0, 0))
self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))
self._help = self._pygame.Surface((410, 240))
self._help = self._help.convert()
self._help.fill((0, 0, 0))
draw_text(self._help, (15, 10), 'Controls:', 10, self._white,
self._pygame)
draw_text(self._help, (205, 10), 'Instructions:', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,
self._pygame)
draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 105), 'Right - Move Right', 10, self
._white, self._pygame)
draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 165), 'Space - Drop', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,
self._pygame)
text = config.instructions
rect = self._pygame.Rect(0, 0, 190, 190)
instructions = render_textrect(text, 8, rect, self._white, (0,
0, 0), 0, self._pygame)
self._help.blit(instructions, (210, 45))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
import config
from graphics import *
class View:
""" The view class which handles the visual component of the application.
"""
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = 200, 0, 0
self._white = 255, 255, 255
def start(self):
""" Start the display. """
self._screen = self._display.set_mode((640, 480))
self._display.set_caption('PolyominOhs!')
self._pygame.mouse.set_visible(0)
def update(self):
""" Update the screen. """
h, s, v = rgb2hsv(self._cycle_colour)
h += 1
self._cycle_colour = hsv2rgb((h, s, v))
if self._state == config.GS_LOADING:
self._screen.blit(self._background, (0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:
selected = self._interface.get_selection()
settings = {config.MENU_LEVEL: str(self._interface.get_level()),
config.MENU_ORDER: str(self._interface.get_order()), config
.MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:
self._interface.get_music()}
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.
_cycle_colour, self._pygame, True)
for button in self._buttons.items():
if button[0] == selected:
button[1].draw(self._screen, config.TXT_HOVER, self.
_pygame, self._cycle_colour)
else:
button[1].draw(self._screen, config.TXT_NORMAL, self.
_pygame)
for radio in self._radios.items():
if radio[0] == selected:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_HOVER, self._cycle_colour, self._pygame)
else:
radio[1].draw(self._screen, settings[radio[0]], config.
TXT_NORMAL, self._cycle_colour, self._pygame)
order = self._interface.get_order()
ominoes = self._master._ominoes[order - 1]
n = self._interface.get_random_omino()
shape = ominoes[0][n]
draw_polyomino(self._screen, (400, 160), shape, 21, self.
_cycle_colour, self._pygame)
if self._state == config.GS_MENU_HIGHSCORES:
draw_border(self._highscores, self._cycle_colour, self._pygame)
for i, highscore in enumerate(self._master.get_highscores()):
name, score = highscore
name = name.replace('_', ' ')
if self._interface.get_highscore_highlight() == i:
colour = self._cycle_colour
else:
colour = self._white
draw_text(self._highscores, (20, 10 + (i + 1) * 25),
name, 10, colour, self._pygame)
draw_text(self._highscores, (175, 10 + (i + 1) * 25),
str(score), 10, colour, self._pygame)
self._screen.blit(self._highscores, (200, 100))
if self._state == config.GS_MENU_ENTER_HIGHSCORE:
self._enterhighscore.fill((0, 0, 0))
draw_border(self._enterhighscore, self._cycle_colour, self.
_pygame)
draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,
self._white, self._pygame)
draw_text(self._enterhighscore, (20, 60),
'Please enter your name:', 10, self._white, self._pygame)
draw_text(self._enterhighscore, (70, 170), 'Press return',
10, self._white, self._pygame)
self._name_entry.update(self._interface.get_highscore_name())
self._name_entry.draw(self._enterhighscore, self._interface
.get_name_selected(), self._cycle_colour, self._pygame)
self._screen.blit(self._enterhighscore, (200, 120))
if self._state == config.GS_MENU_HELP:
draw_border(self._help, self._cycle_colour, self._pygame)
self._screen.blit(self._help, (115, 120))
elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.
GS_GAME_OVER]:
score = str(self._interface.get_score())
lines = str(self._interface.get_lines_cleared())
next_omino = self._interface.get_next_omino()
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (445, 155), score, 10, self._white,
self._pygame)
draw_text(self._screen, (445, 215), lines, 10, self._white,
self._pygame)
if self._state == config.GS_GAME:
draw_polyomino(self._screen, (440, 290), next_omino.
get_shape(0), 21, next_omino.get_colour(), self._pygame)
grid = self._interface.get_field().get_complete_grid()
self._grid.fill((0, 0, 0))
draw_border(self._grid, self._cycle_colour, self._pygame)
if self._state == config.GS_GAME:
size = config.sizes[self._interface.get_order()]
draw_grid(self._grid, (5, 5), grid, size, self._pygame)
elif self._state == config.GS_GAME_PAUSED:
draw_text(self._grid, (30, 115), 'Game Paused', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (40, 185), 'Press y to quit', 10,
self._white, self._pygame)
draw_text(self._grid, (30, 215), 'or esc to resume', 10,
self._white, self._pygame)
elif self._state == config.GS_GAME_OVER:
draw_text(self._grid, (42, 115), 'Game Over', 14, self.
_cycle_colour, self._pygame, True)
draw_text(self._grid, (47, 185), 'Press return', 10, self.
_white, self._pygame)
self._screen.blit(self._grid, (60, 30))
self._display.flip()
def change_state(self, state, interface=None):
""" Change the state of the application and get the new interface
(if given). Set up graphics for the new state if required.
change_state(int, Menu/Game) -> void
"""
self._state = state
if interface != None:
self._interface = interface
if self._state == config.GS_LOADING:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (180, 180), 'Loading...', 36, self.
_white, self._pygame)
elif self._state == config.GS_GAME:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (410, 130), 'Score:', 10, self.
_white, self._pygame)
draw_text(self._background, (410, 190), 'Lines Cleared:', 10,
self._white, self._pygame)
next_text = 'Next ' + config.names[self._interface.get_order()
].title() + ':'
draw_text(self._background, (410, 250), next_text, 10, self.
_white, self._pygame)
w = 210 + 10 - self._interface.get_field().get_size()[0] + 1
h = 420 + 10 - self._interface.get_field().get_size()[1] + 1
self._grid = self._pygame.Surface((w, h))
self._grid = self._grid.convert()
self._grid.fill((0, 0, 0))
self._grid.set_colorkey((0, 0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES]:
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (110, 300), 'Settings:', 10, self.
_white, self._pygame)
draw_text(self._background, (130, 340), 'Difficulty Level:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 400), 'Polyomino Order:', 10,
self._white, self._pygame)
draw_text(self._background, (370, 300), 'Audio:', 10, self.
_white, self._pygame)
draw_text(self._background, (400, 340), 'Sound Effects:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 400), 'Music:', 10, self.
_white, self._pygame)
self._buttons = {}
start_game_button = Button('Start Game', 10, (90, 150))
self._buttons.update({config.MENU_START: start_game_button})
view_highscores_button = Button('View Highscores', 10, (90, 180))
self._buttons.update({config.MENU_HIGHSCORES:
view_highscores_button})
help_button = Button('Help', 10, (90, 210))
self._buttons.update({config.MENU_HELP: help_button})
quit_button = Button('Quit', 10, (90, 240))
self._buttons.update({config.MENU_QUIT: quit_button})
self._radios = {}
level_selection = Radio_Selection([str(n + 1) for n in range(9)
], 10, (160, 365))
self._radios.update({config.MENU_LEVEL: level_selection})
order_selection = Radio_Selection([str(n + 1) for n in range(6)
], 10, (160, 425))
self._radios.update({config.MENU_ORDER: order_selection})
sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))
self._radios.update({config.MENU_SFX: sfx_selection})
music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))
self._radios.update({config.MENU_MUSIC: music_selection})
self._highscores = self._pygame.Surface((250, 300))
self._highscores = self._highscores.convert()
self._highscores.fill((0, 0, 0))
draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.
_white, self._pygame)
self._enterhighscore = self._pygame.Surface((250, 210))
self._enterhighscore = self._enterhighscore.convert()
self._enterhighscore.fill((0, 0, 0))
self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))
self._help = self._pygame.Surface((410, 240))
self._help = self._help.convert()
self._help.fill((0, 0, 0))
draw_text(self._help, (15, 10), 'Controls:', 10, self._white,
self._pygame)
draw_text(self._help, (205, 10), 'Instructions:', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,
self._pygame)
draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 105), 'Right - Move Right', 10, self
._white, self._pygame)
draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 165), 'Space - Drop', 10, self.
_white, self._pygame)
draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,
self._pygame)
text = config.instructions
rect = self._pygame.Rect(0, 0, 190, 190)
instructions = render_textrect(text, 8, rect, self._white, (0,
0, 0), 0, self._pygame)
self._help.blit(instructions, (210, 45))
<|reserved_special_token_1|>
""" view.py: Contains the View class. """
import random
import config
from graphics import *
class View:
""" The view class which handles the visual component of the application.
"""
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = (200, 0, 0)
self._white = (255, 255, 255)
def start(self):
""" Start the display. """
self._screen = self._display.set_mode((640, 480))
self._display.set_caption('PolyominOhs!')
self._pygame.mouse.set_visible(0)
def update(self):
""" Update the screen. """
# Constantly cycle through a colour
h, s, v = rgb2hsv(self._cycle_colour)
h += 1
self._cycle_colour = hsv2rgb((h, s, v))
if self._state == config.GS_LOADING:
self._screen.blit(self._background, (0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:
# Get current selections
selected = self._interface.get_selection()
settings = {config.MENU_LEVEL: str(self._interface.get_level()),
config.MENU_ORDER: str(self._interface.get_order()),
config.MENU_SFX: self._interface.get_sfx(),
config.MENU_MUSIC: self._interface.get_music()}
# Background and title
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (120, 25), 'PolyominOhs!', 36,
self._cycle_colour, self._pygame, True)
# Buttons
for button in self._buttons.items():
if button[0] == selected:
button[1].draw(self._screen, config.TXT_HOVER,
self._pygame, self._cycle_colour)
else:
button[1].draw(self._screen, config.TXT_NORMAL,
self._pygame)
# Radio Selections
for radio in self._radios.items():
if radio[0] == selected:
radio[1].draw(self._screen, settings[radio[0]],
config.TXT_HOVER, self._cycle_colour,
self._pygame)
else:
radio[1].draw(self._screen, settings[radio[0]],
config.TXT_NORMAL, self._cycle_colour,
self._pygame)
# Random polyomino
order = self._interface.get_order()
ominoes = self._master._ominoes[order - 1]
n = self._interface.get_random_omino()
shape = ominoes[0][n]
draw_polyomino(self._screen, (400, 160), shape, 21,
self._cycle_colour, self._pygame)
# Highscores
if self._state == config.GS_MENU_HIGHSCORES:
draw_border(self._highscores, self._cycle_colour, self._pygame)
for i, highscore in enumerate(self._master.get_highscores()):
name, score = highscore
name = name.replace('_', ' ')
if self._interface.get_highscore_highlight() == i:
colour = self._cycle_colour
else:
colour = self._white
draw_text(self._highscores, (20, 10 + (i + 1) * 25), name,
10, colour, self._pygame)
draw_text(self._highscores, (175, 10 + (i + 1) * 25),
str(score), 10, colour, self._pygame)
self._screen.blit(self._highscores, (200, 100))
# Enter highscore
if self._state == config.GS_MENU_ENTER_HIGHSCORE:
self._enterhighscore.fill((0, 0, 0))
draw_border(self._enterhighscore, self._cycle_colour,
self._pygame)
draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,
self._white, self._pygame)
draw_text(self._enterhighscore, (20, 60),
'Please enter your name:', 10, self._white,
self._pygame)
draw_text(self._enterhighscore, (70, 170), 'Press return', 10,
self._white, self._pygame)
self._name_entry.update(self._interface.get_highscore_name())
self._name_entry.draw(self._enterhighscore,
self._interface.get_name_selected(),
self._cycle_colour, self._pygame)
self._screen.blit(self._enterhighscore, (200, 120))
# Help
if self._state == config.GS_MENU_HELP:
draw_border(self._help, self._cycle_colour, self._pygame)
self._screen.blit(self._help, (115, 120))
elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED,
config.GS_GAME_OVER]:
# Get current information
score = str(self._interface.get_score())
lines = str(self._interface.get_lines_cleared())
next_omino = self._interface.get_next_omino()
self._screen.blit(self._background, (0, 0))
# Score and number of lines cleared
draw_text(self._screen, (445, 155), score, 10, self._white,
self._pygame)
draw_text(self._screen, (445, 215), lines, 10, self._white,
self._pygame)
# Draw next polyomino
if self._state == config.GS_GAME:
draw_polyomino(self._screen, (440, 290), next_omino.get_shape(0),
21, next_omino.get_colour(), self._pygame)
# Draw grid of blocks (or pause or game over screen)
grid = self._interface.get_field().get_complete_grid()
self._grid.fill((0, 0, 0))
draw_border(self._grid, self._cycle_colour, self._pygame)
if self._state == config.GS_GAME:
size = config.sizes[self._interface.get_order()]
draw_grid(self._grid, (5, 5), grid, size, self._pygame)
elif self._state == config.GS_GAME_PAUSED:
draw_text(self._grid, (30, 115), 'Game Paused', 14,
self._cycle_colour, self._pygame, True)
draw_text(self._grid, (40, 185), 'Press y to quit', 10,
self._white, self._pygame)
draw_text(self._grid, (30, 215), 'or esc to resume', 10,
self._white, self._pygame)
elif self._state == config.GS_GAME_OVER:
draw_text(self._grid, (42, 115), 'Game Over', 14,
self._cycle_colour, self._pygame, True)
draw_text(self._grid, (47, 185), 'Press return', 10,
self._white, self._pygame)
self._screen.blit(self._grid, (60, 30))
self._display.flip()
def change_state(self, state, interface=None):
""" Change the state of the application and get the new interface
(if given). Set up graphics for the new state if required.
change_state(int, Menu/Game) -> void
"""
self._state = state
if interface != None:
self._interface = interface
if self._state == config.GS_LOADING:
# Background with loading text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (180, 180), 'Loading...', 36,
self._white, self._pygame)
elif self._state == config.GS_GAME:
# Background with static text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (410, 130), 'Score:', 10,
self._white, self._pygame)
draw_text(self._background, (410, 190), 'Lines Cleared:', 10,
self._white, self._pygame)
next_text = 'Next ' + \
config.names[self._interface.get_order()].title() + ':'
draw_text(self._background, (410, 250), next_text, 10,
self._white, self._pygame)
# Grid
w = 210 + 10 - self._interface.get_field().get_size()[0] + 1
h = 420 + 10 - self._interface.get_field().get_size()[1] + 1
self._grid = self._pygame.Surface((w, h))
self._grid = self._grid.convert()
self._grid.fill((0, 0, 0))
self._grid.set_colorkey((0, 0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES]:
# Background with static text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (110, 300), 'Settings:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 340), 'Difficulty Level:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 400), 'Polyomino Order:', 10,
self._white, self._pygame)
draw_text(self._background, (370, 300), 'Audio:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 340), 'Sound Effects:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 400), 'Music:', 10,
self._white, self._pygame)
# Buttons
self._buttons = {}
start_game_button = Button('Start Game', 10, (90, 150))
self._buttons.update({config.MENU_START: start_game_button})
view_highscores_button = Button('View Highscores', 10, (90, 180))
self._buttons.update({config.MENU_HIGHSCORES: view_highscores_button})
help_button = Button('Help', 10, (90, 210))
self._buttons.update({config.MENU_HELP: help_button})
quit_button = Button('Quit', 10, (90, 240))
self._buttons.update({config.MENU_QUIT: quit_button})
# Radio Selections
self._radios = {}
level_selection = Radio_Selection([str(n + 1) for n in range(9)],
10, (160, 365))
self._radios.update({config.MENU_LEVEL: level_selection})
order_selection = Radio_Selection([str(n + 1) for n in range(6)],
10, (160, 425))
self._radios.update({config.MENU_ORDER: order_selection})
sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))
self._radios.update({config.MENU_SFX: sfx_selection})
music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))
self._radios.update({config.MENU_MUSIC: music_selection})
# Highscores Screen
self._highscores = self._pygame.Surface((250, 300))
self._highscores = self._highscores.convert()
self._highscores.fill((0, 0, 0))
draw_text(self._highscores, (15, 10), 'Highscores:', 10,
self._white, self._pygame)
# Enter highscore name screen
self._enterhighscore = self._pygame.Surface((250, 210))
self._enterhighscore = self._enterhighscore.convert()
self._enterhighscore.fill((0, 0, 0))
self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))
# Help Screen
self._help = self._pygame.Surface((410, 240))
self._help = self._help.convert()
self._help.fill((0, 0, 0))
draw_text(self._help, (15, 10), 'Controls:', 10, self._white,
self._pygame)
draw_text(self._help, (205, 10), 'Instructions:', 10,
self._white, self._pygame)
draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,
self._pygame)
draw_text(self._help, (20, 75), 'Left - Move Left', 10,
self._white, self._pygame)
draw_text(self._help, (20, 105), 'Right - Move Right', 10,
self._white, self._pygame)
draw_text(self._help, (20, 135), 'Down - Move Down', 10,
self._white, self._pygame)
draw_text(self._help, (20, 165), 'Space - Drop', 10, self._white,
self._pygame)
draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,
self._pygame)
text = config.instructions
rect = self._pygame.Rect(0, 0, 190, 190)
instructions = render_textrect(text, 8, rect, self._white,
(0, 0, 0), 0, self._pygame)
self._help.blit(instructions, (210, 45))
|
flexible
|
{
"blob_id": "2168d10a1b4796576cc7ebb6893e0dc8b58085ca",
"index": 4435,
"step-1": "<mask token>\n\n\nclass View:\n <mask token>\n\n def __init__(self, pygame, master):\n \"\"\" Set up and initialise the view. Does not start the display. \"\"\"\n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = 200, 0, 0\n self._white = 255, 255, 255\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass View:\n <mask token>\n\n def __init__(self, pygame, master):\n \"\"\" Set up and initialise the view. Does not start the display. \"\"\"\n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = 200, 0, 0\n self._white = 255, 255, 255\n\n def start(self):\n \"\"\" Start the display. \"\"\"\n self._screen = self._display.set_mode((640, 480))\n self._display.set_caption('PolyominOhs!')\n self._pygame.mouse.set_visible(0)\n\n def update(self):\n \"\"\" Update the screen. \"\"\"\n h, s, v = rgb2hsv(self._cycle_colour)\n h += 1\n self._cycle_colour = hsv2rgb((h, s, v))\n if self._state == config.GS_LOADING:\n self._screen.blit(self._background, (0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:\n selected = self._interface.get_selection()\n settings = {config.MENU_LEVEL: str(self._interface.get_level()),\n config.MENU_ORDER: str(self._interface.get_order()), config\n .MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:\n self._interface.get_music()}\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.\n _cycle_colour, self._pygame, True)\n for button in self._buttons.items():\n if button[0] == selected:\n button[1].draw(self._screen, config.TXT_HOVER, self.\n _pygame, self._cycle_colour)\n else:\n button[1].draw(self._screen, config.TXT_NORMAL, self.\n _pygame)\n for radio in self._radios.items():\n if radio[0] == selected:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_HOVER, self._cycle_colour, self._pygame)\n else:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_NORMAL, self._cycle_colour, self._pygame)\n order = self._interface.get_order()\n ominoes = self._master._ominoes[order - 1]\n n = self._interface.get_random_omino()\n shape = ominoes[0][n]\n draw_polyomino(self._screen, (400, 160), shape, 21, self.\n _cycle_colour, self._pygame)\n if self._state == config.GS_MENU_HIGHSCORES:\n draw_border(self._highscores, self._cycle_colour, self._pygame)\n for i, highscore in enumerate(self._master.get_highscores()):\n name, score = highscore\n name = name.replace('_', ' ')\n if self._interface.get_highscore_highlight() == i:\n colour = self._cycle_colour\n else:\n colour = self._white\n draw_text(self._highscores, (20, 10 + (i + 1) * 25),\n name, 10, colour, self._pygame)\n draw_text(self._highscores, (175, 10 + (i + 1) * 25),\n str(score), 10, colour, self._pygame)\n self._screen.blit(self._highscores, (200, 100))\n if self._state == config.GS_MENU_ENTER_HIGHSCORE:\n self._enterhighscore.fill((0, 0, 0))\n draw_border(self._enterhighscore, self._cycle_colour, self.\n _pygame)\n draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,\n self._white, self._pygame)\n draw_text(self._enterhighscore, (20, 60),\n 'Please enter your name:', 10, self._white, self._pygame)\n draw_text(self._enterhighscore, (70, 170), 'Press return', \n 10, self._white, self._pygame)\n self._name_entry.update(self._interface.get_highscore_name())\n self._name_entry.draw(self._enterhighscore, self._interface\n .get_name_selected(), self._cycle_colour, self._pygame)\n self._screen.blit(self._enterhighscore, (200, 120))\n if self._state == config.GS_MENU_HELP:\n draw_border(self._help, self._cycle_colour, self._pygame)\n self._screen.blit(self._help, (115, 120))\n elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.\n GS_GAME_OVER]:\n score = str(self._interface.get_score())\n lines = str(self._interface.get_lines_cleared())\n next_omino = self._interface.get_next_omino()\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (445, 155), score, 10, self._white,\n self._pygame)\n draw_text(self._screen, (445, 215), lines, 10, self._white,\n self._pygame)\n if self._state == config.GS_GAME:\n draw_polyomino(self._screen, (440, 290), next_omino.\n get_shape(0), 21, next_omino.get_colour(), self._pygame)\n grid = self._interface.get_field().get_complete_grid()\n self._grid.fill((0, 0, 0))\n draw_border(self._grid, self._cycle_colour, self._pygame)\n if self._state == config.GS_GAME:\n size = config.sizes[self._interface.get_order()]\n draw_grid(self._grid, (5, 5), grid, size, self._pygame)\n elif self._state == config.GS_GAME_PAUSED:\n draw_text(self._grid, (30, 115), 'Game Paused', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (40, 185), 'Press y to quit', 10,\n self._white, self._pygame)\n draw_text(self._grid, (30, 215), 'or esc to resume', 10,\n self._white, self._pygame)\n elif self._state == config.GS_GAME_OVER:\n draw_text(self._grid, (42, 115), 'Game Over', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (47, 185), 'Press return', 10, self.\n _white, self._pygame)\n self._screen.blit(self._grid, (60, 30))\n self._display.flip()\n\n def change_state(self, state, interface=None):\n \"\"\" Change the state of the application and get the new interface\n (if given). Set up graphics for the new state if required.\n \n change_state(int, Menu/Game) -> void\n \"\"\"\n self._state = state\n if interface != None:\n self._interface = interface\n if self._state == config.GS_LOADING:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (180, 180), 'Loading...', 36, self.\n _white, self._pygame)\n elif self._state == config.GS_GAME:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (410, 130), 'Score:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (410, 190), 'Lines Cleared:', 10,\n self._white, self._pygame)\n next_text = 'Next ' + config.names[self._interface.get_order()\n ].title() + ':'\n draw_text(self._background, (410, 250), next_text, 10, self.\n _white, self._pygame)\n w = 210 + 10 - self._interface.get_field().get_size()[0] + 1\n h = 420 + 10 - self._interface.get_field().get_size()[1] + 1\n self._grid = self._pygame.Surface((w, h))\n self._grid = self._grid.convert()\n self._grid.fill((0, 0, 0))\n self._grid.set_colorkey((0, 0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES]:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (110, 300), 'Settings:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (130, 340), 'Difficulty Level:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 400), 'Polyomino Order:', 10,\n self._white, self._pygame)\n draw_text(self._background, (370, 300), 'Audio:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (400, 340), 'Sound Effects:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 400), 'Music:', 10, self.\n _white, self._pygame)\n self._buttons = {}\n start_game_button = Button('Start Game', 10, (90, 150))\n self._buttons.update({config.MENU_START: start_game_button})\n view_highscores_button = Button('View Highscores', 10, (90, 180))\n self._buttons.update({config.MENU_HIGHSCORES:\n view_highscores_button})\n help_button = Button('Help', 10, (90, 210))\n self._buttons.update({config.MENU_HELP: help_button})\n quit_button = Button('Quit', 10, (90, 240))\n self._buttons.update({config.MENU_QUIT: quit_button})\n self._radios = {}\n level_selection = Radio_Selection([str(n + 1) for n in range(9)\n ], 10, (160, 365))\n self._radios.update({config.MENU_LEVEL: level_selection})\n order_selection = Radio_Selection([str(n + 1) for n in range(6)\n ], 10, (160, 425))\n self._radios.update({config.MENU_ORDER: order_selection})\n sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))\n self._radios.update({config.MENU_SFX: sfx_selection})\n music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))\n self._radios.update({config.MENU_MUSIC: music_selection})\n self._highscores = self._pygame.Surface((250, 300))\n self._highscores = self._highscores.convert()\n self._highscores.fill((0, 0, 0))\n draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.\n _white, self._pygame)\n self._enterhighscore = self._pygame.Surface((250, 210))\n self._enterhighscore = self._enterhighscore.convert()\n self._enterhighscore.fill((0, 0, 0))\n self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))\n self._help = self._pygame.Surface((410, 240))\n self._help = self._help.convert()\n self._help.fill((0, 0, 0))\n draw_text(self._help, (15, 10), 'Controls:', 10, self._white,\n self._pygame)\n draw_text(self._help, (205, 10), 'Instructions:', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 105), 'Right - Move Right', 10, self\n ._white, self._pygame)\n draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 165), 'Space - Drop', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,\n self._pygame)\n text = config.instructions\n rect = self._pygame.Rect(0, 0, 190, 190)\n instructions = render_textrect(text, 8, rect, self._white, (0, \n 0, 0), 0, self._pygame)\n self._help.blit(instructions, (210, 45))\n",
"step-3": "<mask token>\n\n\nclass View:\n \"\"\" The view class which handles the visual component of the application.\n \"\"\"\n\n def __init__(self, pygame, master):\n \"\"\" Set up and initialise the view. Does not start the display. \"\"\"\n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = 200, 0, 0\n self._white = 255, 255, 255\n\n def start(self):\n \"\"\" Start the display. \"\"\"\n self._screen = self._display.set_mode((640, 480))\n self._display.set_caption('PolyominOhs!')\n self._pygame.mouse.set_visible(0)\n\n def update(self):\n \"\"\" Update the screen. \"\"\"\n h, s, v = rgb2hsv(self._cycle_colour)\n h += 1\n self._cycle_colour = hsv2rgb((h, s, v))\n if self._state == config.GS_LOADING:\n self._screen.blit(self._background, (0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:\n selected = self._interface.get_selection()\n settings = {config.MENU_LEVEL: str(self._interface.get_level()),\n config.MENU_ORDER: str(self._interface.get_order()), config\n .MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:\n self._interface.get_music()}\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.\n _cycle_colour, self._pygame, True)\n for button in self._buttons.items():\n if button[0] == selected:\n button[1].draw(self._screen, config.TXT_HOVER, self.\n _pygame, self._cycle_colour)\n else:\n button[1].draw(self._screen, config.TXT_NORMAL, self.\n _pygame)\n for radio in self._radios.items():\n if radio[0] == selected:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_HOVER, self._cycle_colour, self._pygame)\n else:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_NORMAL, self._cycle_colour, self._pygame)\n order = self._interface.get_order()\n ominoes = self._master._ominoes[order - 1]\n n = self._interface.get_random_omino()\n shape = ominoes[0][n]\n draw_polyomino(self._screen, (400, 160), shape, 21, self.\n _cycle_colour, self._pygame)\n if self._state == config.GS_MENU_HIGHSCORES:\n draw_border(self._highscores, self._cycle_colour, self._pygame)\n for i, highscore in enumerate(self._master.get_highscores()):\n name, score = highscore\n name = name.replace('_', ' ')\n if self._interface.get_highscore_highlight() == i:\n colour = self._cycle_colour\n else:\n colour = self._white\n draw_text(self._highscores, (20, 10 + (i + 1) * 25),\n name, 10, colour, self._pygame)\n draw_text(self._highscores, (175, 10 + (i + 1) * 25),\n str(score), 10, colour, self._pygame)\n self._screen.blit(self._highscores, (200, 100))\n if self._state == config.GS_MENU_ENTER_HIGHSCORE:\n self._enterhighscore.fill((0, 0, 0))\n draw_border(self._enterhighscore, self._cycle_colour, self.\n _pygame)\n draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,\n self._white, self._pygame)\n draw_text(self._enterhighscore, (20, 60),\n 'Please enter your name:', 10, self._white, self._pygame)\n draw_text(self._enterhighscore, (70, 170), 'Press return', \n 10, self._white, self._pygame)\n self._name_entry.update(self._interface.get_highscore_name())\n self._name_entry.draw(self._enterhighscore, self._interface\n .get_name_selected(), self._cycle_colour, self._pygame)\n self._screen.blit(self._enterhighscore, (200, 120))\n if self._state == config.GS_MENU_HELP:\n draw_border(self._help, self._cycle_colour, self._pygame)\n self._screen.blit(self._help, (115, 120))\n elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.\n GS_GAME_OVER]:\n score = str(self._interface.get_score())\n lines = str(self._interface.get_lines_cleared())\n next_omino = self._interface.get_next_omino()\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (445, 155), score, 10, self._white,\n self._pygame)\n draw_text(self._screen, (445, 215), lines, 10, self._white,\n self._pygame)\n if self._state == config.GS_GAME:\n draw_polyomino(self._screen, (440, 290), next_omino.\n get_shape(0), 21, next_omino.get_colour(), self._pygame)\n grid = self._interface.get_field().get_complete_grid()\n self._grid.fill((0, 0, 0))\n draw_border(self._grid, self._cycle_colour, self._pygame)\n if self._state == config.GS_GAME:\n size = config.sizes[self._interface.get_order()]\n draw_grid(self._grid, (5, 5), grid, size, self._pygame)\n elif self._state == config.GS_GAME_PAUSED:\n draw_text(self._grid, (30, 115), 'Game Paused', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (40, 185), 'Press y to quit', 10,\n self._white, self._pygame)\n draw_text(self._grid, (30, 215), 'or esc to resume', 10,\n self._white, self._pygame)\n elif self._state == config.GS_GAME_OVER:\n draw_text(self._grid, (42, 115), 'Game Over', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (47, 185), 'Press return', 10, self.\n _white, self._pygame)\n self._screen.blit(self._grid, (60, 30))\n self._display.flip()\n\n def change_state(self, state, interface=None):\n \"\"\" Change the state of the application and get the new interface\n (if given). Set up graphics for the new state if required.\n \n change_state(int, Menu/Game) -> void\n \"\"\"\n self._state = state\n if interface != None:\n self._interface = interface\n if self._state == config.GS_LOADING:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (180, 180), 'Loading...', 36, self.\n _white, self._pygame)\n elif self._state == config.GS_GAME:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (410, 130), 'Score:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (410, 190), 'Lines Cleared:', 10,\n self._white, self._pygame)\n next_text = 'Next ' + config.names[self._interface.get_order()\n ].title() + ':'\n draw_text(self._background, (410, 250), next_text, 10, self.\n _white, self._pygame)\n w = 210 + 10 - self._interface.get_field().get_size()[0] + 1\n h = 420 + 10 - self._interface.get_field().get_size()[1] + 1\n self._grid = self._pygame.Surface((w, h))\n self._grid = self._grid.convert()\n self._grid.fill((0, 0, 0))\n self._grid.set_colorkey((0, 0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES]:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (110, 300), 'Settings:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (130, 340), 'Difficulty Level:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 400), 'Polyomino Order:', 10,\n self._white, self._pygame)\n draw_text(self._background, (370, 300), 'Audio:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (400, 340), 'Sound Effects:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 400), 'Music:', 10, self.\n _white, self._pygame)\n self._buttons = {}\n start_game_button = Button('Start Game', 10, (90, 150))\n self._buttons.update({config.MENU_START: start_game_button})\n view_highscores_button = Button('View Highscores', 10, (90, 180))\n self._buttons.update({config.MENU_HIGHSCORES:\n view_highscores_button})\n help_button = Button('Help', 10, (90, 210))\n self._buttons.update({config.MENU_HELP: help_button})\n quit_button = Button('Quit', 10, (90, 240))\n self._buttons.update({config.MENU_QUIT: quit_button})\n self._radios = {}\n level_selection = Radio_Selection([str(n + 1) for n in range(9)\n ], 10, (160, 365))\n self._radios.update({config.MENU_LEVEL: level_selection})\n order_selection = Radio_Selection([str(n + 1) for n in range(6)\n ], 10, (160, 425))\n self._radios.update({config.MENU_ORDER: order_selection})\n sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))\n self._radios.update({config.MENU_SFX: sfx_selection})\n music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))\n self._radios.update({config.MENU_MUSIC: music_selection})\n self._highscores = self._pygame.Surface((250, 300))\n self._highscores = self._highscores.convert()\n self._highscores.fill((0, 0, 0))\n draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.\n _white, self._pygame)\n self._enterhighscore = self._pygame.Surface((250, 210))\n self._enterhighscore = self._enterhighscore.convert()\n self._enterhighscore.fill((0, 0, 0))\n self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))\n self._help = self._pygame.Surface((410, 240))\n self._help = self._help.convert()\n self._help.fill((0, 0, 0))\n draw_text(self._help, (15, 10), 'Controls:', 10, self._white,\n self._pygame)\n draw_text(self._help, (205, 10), 'Instructions:', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 105), 'Right - Move Right', 10, self\n ._white, self._pygame)\n draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 165), 'Space - Drop', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,\n self._pygame)\n text = config.instructions\n rect = self._pygame.Rect(0, 0, 190, 190)\n instructions = render_textrect(text, 8, rect, self._white, (0, \n 0, 0), 0, self._pygame)\n self._help.blit(instructions, (210, 45))\n",
"step-4": "<mask token>\nimport random\nimport config\nfrom graphics import *\n\n\nclass View:\n \"\"\" The view class which handles the visual component of the application.\n \"\"\"\n\n def __init__(self, pygame, master):\n \"\"\" Set up and initialise the view. Does not start the display. \"\"\"\n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = 200, 0, 0\n self._white = 255, 255, 255\n\n def start(self):\n \"\"\" Start the display. \"\"\"\n self._screen = self._display.set_mode((640, 480))\n self._display.set_caption('PolyominOhs!')\n self._pygame.mouse.set_visible(0)\n\n def update(self):\n \"\"\" Update the screen. \"\"\"\n h, s, v = rgb2hsv(self._cycle_colour)\n h += 1\n self._cycle_colour = hsv2rgb((h, s, v))\n if self._state == config.GS_LOADING:\n self._screen.blit(self._background, (0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:\n selected = self._interface.get_selection()\n settings = {config.MENU_LEVEL: str(self._interface.get_level()),\n config.MENU_ORDER: str(self._interface.get_order()), config\n .MENU_SFX: self._interface.get_sfx(), config.MENU_MUSIC:\n self._interface.get_music()}\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (120, 25), 'PolyominOhs!', 36, self.\n _cycle_colour, self._pygame, True)\n for button in self._buttons.items():\n if button[0] == selected:\n button[1].draw(self._screen, config.TXT_HOVER, self.\n _pygame, self._cycle_colour)\n else:\n button[1].draw(self._screen, config.TXT_NORMAL, self.\n _pygame)\n for radio in self._radios.items():\n if radio[0] == selected:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_HOVER, self._cycle_colour, self._pygame)\n else:\n radio[1].draw(self._screen, settings[radio[0]], config.\n TXT_NORMAL, self._cycle_colour, self._pygame)\n order = self._interface.get_order()\n ominoes = self._master._ominoes[order - 1]\n n = self._interface.get_random_omino()\n shape = ominoes[0][n]\n draw_polyomino(self._screen, (400, 160), shape, 21, self.\n _cycle_colour, self._pygame)\n if self._state == config.GS_MENU_HIGHSCORES:\n draw_border(self._highscores, self._cycle_colour, self._pygame)\n for i, highscore in enumerate(self._master.get_highscores()):\n name, score = highscore\n name = name.replace('_', ' ')\n if self._interface.get_highscore_highlight() == i:\n colour = self._cycle_colour\n else:\n colour = self._white\n draw_text(self._highscores, (20, 10 + (i + 1) * 25),\n name, 10, colour, self._pygame)\n draw_text(self._highscores, (175, 10 + (i + 1) * 25),\n str(score), 10, colour, self._pygame)\n self._screen.blit(self._highscores, (200, 100))\n if self._state == config.GS_MENU_ENTER_HIGHSCORE:\n self._enterhighscore.fill((0, 0, 0))\n draw_border(self._enterhighscore, self._cycle_colour, self.\n _pygame)\n draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,\n self._white, self._pygame)\n draw_text(self._enterhighscore, (20, 60),\n 'Please enter your name:', 10, self._white, self._pygame)\n draw_text(self._enterhighscore, (70, 170), 'Press return', \n 10, self._white, self._pygame)\n self._name_entry.update(self._interface.get_highscore_name())\n self._name_entry.draw(self._enterhighscore, self._interface\n .get_name_selected(), self._cycle_colour, self._pygame)\n self._screen.blit(self._enterhighscore, (200, 120))\n if self._state == config.GS_MENU_HELP:\n draw_border(self._help, self._cycle_colour, self._pygame)\n self._screen.blit(self._help, (115, 120))\n elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED, config.\n GS_GAME_OVER]:\n score = str(self._interface.get_score())\n lines = str(self._interface.get_lines_cleared())\n next_omino = self._interface.get_next_omino()\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (445, 155), score, 10, self._white,\n self._pygame)\n draw_text(self._screen, (445, 215), lines, 10, self._white,\n self._pygame)\n if self._state == config.GS_GAME:\n draw_polyomino(self._screen, (440, 290), next_omino.\n get_shape(0), 21, next_omino.get_colour(), self._pygame)\n grid = self._interface.get_field().get_complete_grid()\n self._grid.fill((0, 0, 0))\n draw_border(self._grid, self._cycle_colour, self._pygame)\n if self._state == config.GS_GAME:\n size = config.sizes[self._interface.get_order()]\n draw_grid(self._grid, (5, 5), grid, size, self._pygame)\n elif self._state == config.GS_GAME_PAUSED:\n draw_text(self._grid, (30, 115), 'Game Paused', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (40, 185), 'Press y to quit', 10,\n self._white, self._pygame)\n draw_text(self._grid, (30, 215), 'or esc to resume', 10,\n self._white, self._pygame)\n elif self._state == config.GS_GAME_OVER:\n draw_text(self._grid, (42, 115), 'Game Over', 14, self.\n _cycle_colour, self._pygame, True)\n draw_text(self._grid, (47, 185), 'Press return', 10, self.\n _white, self._pygame)\n self._screen.blit(self._grid, (60, 30))\n self._display.flip()\n\n def change_state(self, state, interface=None):\n \"\"\" Change the state of the application and get the new interface\n (if given). Set up graphics for the new state if required.\n \n change_state(int, Menu/Game) -> void\n \"\"\"\n self._state = state\n if interface != None:\n self._interface = interface\n if self._state == config.GS_LOADING:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (180, 180), 'Loading...', 36, self.\n _white, self._pygame)\n elif self._state == config.GS_GAME:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (410, 130), 'Score:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (410, 190), 'Lines Cleared:', 10,\n self._white, self._pygame)\n next_text = 'Next ' + config.names[self._interface.get_order()\n ].title() + ':'\n draw_text(self._background, (410, 250), next_text, 10, self.\n _white, self._pygame)\n w = 210 + 10 - self._interface.get_field().get_size()[0] + 1\n h = 420 + 10 - self._interface.get_field().get_size()[1] + 1\n self._grid = self._pygame.Surface((w, h))\n self._grid = self._grid.convert()\n self._grid.fill((0, 0, 0))\n self._grid.set_colorkey((0, 0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES]:\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (110, 300), 'Settings:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (130, 340), 'Difficulty Level:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 400), 'Polyomino Order:', 10,\n self._white, self._pygame)\n draw_text(self._background, (370, 300), 'Audio:', 10, self.\n _white, self._pygame)\n draw_text(self._background, (400, 340), 'Sound Effects:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 400), 'Music:', 10, self.\n _white, self._pygame)\n self._buttons = {}\n start_game_button = Button('Start Game', 10, (90, 150))\n self._buttons.update({config.MENU_START: start_game_button})\n view_highscores_button = Button('View Highscores', 10, (90, 180))\n self._buttons.update({config.MENU_HIGHSCORES:\n view_highscores_button})\n help_button = Button('Help', 10, (90, 210))\n self._buttons.update({config.MENU_HELP: help_button})\n quit_button = Button('Quit', 10, (90, 240))\n self._buttons.update({config.MENU_QUIT: quit_button})\n self._radios = {}\n level_selection = Radio_Selection([str(n + 1) for n in range(9)\n ], 10, (160, 365))\n self._radios.update({config.MENU_LEVEL: level_selection})\n order_selection = Radio_Selection([str(n + 1) for n in range(6)\n ], 10, (160, 425))\n self._radios.update({config.MENU_ORDER: order_selection})\n sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))\n self._radios.update({config.MENU_SFX: sfx_selection})\n music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))\n self._radios.update({config.MENU_MUSIC: music_selection})\n self._highscores = self._pygame.Surface((250, 300))\n self._highscores = self._highscores.convert()\n self._highscores.fill((0, 0, 0))\n draw_text(self._highscores, (15, 10), 'Highscores:', 10, self.\n _white, self._pygame)\n self._enterhighscore = self._pygame.Surface((250, 210))\n self._enterhighscore = self._enterhighscore.convert()\n self._enterhighscore.fill((0, 0, 0))\n self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))\n self._help = self._pygame.Surface((410, 240))\n self._help = self._help.convert()\n self._help.fill((0, 0, 0))\n draw_text(self._help, (15, 10), 'Controls:', 10, self._white,\n self._pygame)\n draw_text(self._help, (205, 10), 'Instructions:', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 75), 'Left - Move Left', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 105), 'Right - Move Right', 10, self\n ._white, self._pygame)\n draw_text(self._help, (20, 135), 'Down - Move Down', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 165), 'Space - Drop', 10, self.\n _white, self._pygame)\n draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,\n self._pygame)\n text = config.instructions\n rect = self._pygame.Rect(0, 0, 190, 190)\n instructions = render_textrect(text, 8, rect, self._white, (0, \n 0, 0), 0, self._pygame)\n self._help.blit(instructions, (210, 45))\n",
"step-5": "\n\"\"\" view.py: Contains the View class. \"\"\"\n\n\nimport random\n\nimport config\nfrom graphics import *\n\n\nclass View:\n \n \"\"\" The view class which handles the visual component of the application.\n \"\"\"\n \n def __init__(self, pygame, master):\n \"\"\" Set up and initialise the view. Does not start the display. \"\"\"\n \n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = (200, 0, 0)\n self._white = (255, 255, 255)\n \n def start(self):\n \"\"\" Start the display. \"\"\"\n \n self._screen = self._display.set_mode((640, 480))\n self._display.set_caption('PolyominOhs!')\n self._pygame.mouse.set_visible(0)\n \n def update(self):\n \"\"\" Update the screen. \"\"\"\n \n # Constantly cycle through a colour\n h, s, v = rgb2hsv(self._cycle_colour)\n h += 1\n self._cycle_colour = hsv2rgb((h, s, v))\n \n if self._state == config.GS_LOADING:\n self._screen.blit(self._background, (0, 0))\n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:\n \n # Get current selections\n selected = self._interface.get_selection()\n settings = {config.MENU_LEVEL: str(self._interface.get_level()),\n config.MENU_ORDER: str(self._interface.get_order()),\n config.MENU_SFX: self._interface.get_sfx(),\n config.MENU_MUSIC: self._interface.get_music()}\n \n # Background and title\n self._screen.blit(self._background, (0, 0))\n draw_text(self._screen, (120, 25), 'PolyominOhs!', 36,\n self._cycle_colour, self._pygame, True)\n \n # Buttons\n for button in self._buttons.items():\n if button[0] == selected:\n button[1].draw(self._screen, config.TXT_HOVER,\n self._pygame, self._cycle_colour)\n else:\n button[1].draw(self._screen, config.TXT_NORMAL,\n self._pygame)\n \n # Radio Selections\n for radio in self._radios.items():\n if radio[0] == selected:\n radio[1].draw(self._screen, settings[radio[0]],\n config.TXT_HOVER, self._cycle_colour,\n self._pygame)\n else:\n radio[1].draw(self._screen, settings[radio[0]],\n config.TXT_NORMAL, self._cycle_colour,\n self._pygame)\n \n # Random polyomino\n order = self._interface.get_order()\n ominoes = self._master._ominoes[order - 1]\n n = self._interface.get_random_omino()\n shape = ominoes[0][n]\n draw_polyomino(self._screen, (400, 160), shape, 21,\n self._cycle_colour, self._pygame)\n \n # Highscores\n if self._state == config.GS_MENU_HIGHSCORES:\n draw_border(self._highscores, self._cycle_colour, self._pygame)\n for i, highscore in enumerate(self._master.get_highscores()):\n name, score = highscore\n name = name.replace('_', ' ')\n if self._interface.get_highscore_highlight() == i:\n colour = self._cycle_colour\n else:\n colour = self._white\n draw_text(self._highscores, (20, 10 + (i + 1) * 25), name,\n 10, colour, self._pygame)\n draw_text(self._highscores, (175, 10 + (i + 1) * 25),\n str(score), 10, colour, self._pygame)\n self._screen.blit(self._highscores, (200, 100))\n \n # Enter highscore\n if self._state == config.GS_MENU_ENTER_HIGHSCORE:\n self._enterhighscore.fill((0, 0, 0))\n draw_border(self._enterhighscore, self._cycle_colour,\n self._pygame)\n draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,\n self._white, self._pygame)\n draw_text(self._enterhighscore, (20, 60),\n 'Please enter your name:', 10, self._white,\n self._pygame)\n draw_text(self._enterhighscore, (70, 170), 'Press return', 10,\n self._white, self._pygame)\n self._name_entry.update(self._interface.get_highscore_name())\n self._name_entry.draw(self._enterhighscore,\n self._interface.get_name_selected(),\n self._cycle_colour, self._pygame)\n self._screen.blit(self._enterhighscore, (200, 120))\n \n # Help\n if self._state == config.GS_MENU_HELP:\n draw_border(self._help, self._cycle_colour, self._pygame)\n self._screen.blit(self._help, (115, 120))\n \n elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED,\n config.GS_GAME_OVER]:\n \n # Get current information\n score = str(self._interface.get_score())\n lines = str(self._interface.get_lines_cleared())\n next_omino = self._interface.get_next_omino()\n \n self._screen.blit(self._background, (0, 0))\n \n # Score and number of lines cleared\n draw_text(self._screen, (445, 155), score, 10, self._white,\n self._pygame)\n draw_text(self._screen, (445, 215), lines, 10, self._white,\n self._pygame)\n \n # Draw next polyomino\n if self._state == config.GS_GAME:\n draw_polyomino(self._screen, (440, 290), next_omino.get_shape(0),\n 21, next_omino.get_colour(), self._pygame)\n \n # Draw grid of blocks (or pause or game over screen)\n grid = self._interface.get_field().get_complete_grid()\n self._grid.fill((0, 0, 0))\n draw_border(self._grid, self._cycle_colour, self._pygame)\n \n if self._state == config.GS_GAME:\n size = config.sizes[self._interface.get_order()]\n draw_grid(self._grid, (5, 5), grid, size, self._pygame)\n elif self._state == config.GS_GAME_PAUSED:\n draw_text(self._grid, (30, 115), 'Game Paused', 14,\n self._cycle_colour, self._pygame, True)\n draw_text(self._grid, (40, 185), 'Press y to quit', 10,\n self._white, self._pygame)\n draw_text(self._grid, (30, 215), 'or esc to resume', 10,\n self._white, self._pygame)\n elif self._state == config.GS_GAME_OVER:\n draw_text(self._grid, (42, 115), 'Game Over', 14,\n self._cycle_colour, self._pygame, True)\n draw_text(self._grid, (47, 185), 'Press return', 10,\n self._white, self._pygame)\n \n self._screen.blit(self._grid, (60, 30))\n \n self._display.flip()\n \n def change_state(self, state, interface=None):\n \"\"\" Change the state of the application and get the new interface\n (if given). Set up graphics for the new state if required.\n \n change_state(int, Menu/Game) -> void\n \"\"\"\n \n self._state = state\n if interface != None:\n self._interface = interface\n \n if self._state == config.GS_LOADING:\n \n # Background with loading text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (180, 180), 'Loading...', 36,\n self._white, self._pygame)\n \n elif self._state == config.GS_GAME:\n \n # Background with static text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n \n draw_text(self._background, (410, 130), 'Score:', 10,\n self._white, self._pygame)\n draw_text(self._background, (410, 190), 'Lines Cleared:', 10,\n self._white, self._pygame)\n \n next_text = 'Next ' + \\\n config.names[self._interface.get_order()].title() + ':'\n draw_text(self._background, (410, 250), next_text, 10,\n self._white, self._pygame)\n \n # Grid\n w = 210 + 10 - self._interface.get_field().get_size()[0] + 1\n h = 420 + 10 - self._interface.get_field().get_size()[1] + 1\n self._grid = self._pygame.Surface((w, h))\n self._grid = self._grid.convert()\n self._grid.fill((0, 0, 0))\n self._grid.set_colorkey((0, 0, 0))\n \n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES]:\n \n # Background with static text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n \n draw_text(self._background, (110, 300), 'Settings:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 340), 'Difficulty Level:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 400), 'Polyomino Order:', 10,\n self._white, self._pygame)\n \n draw_text(self._background, (370, 300), 'Audio:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 340), 'Sound Effects:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 400), 'Music:', 10,\n self._white, self._pygame)\n \n # Buttons\n self._buttons = {}\n start_game_button = Button('Start Game', 10, (90, 150))\n self._buttons.update({config.MENU_START: start_game_button})\n view_highscores_button = Button('View Highscores', 10, (90, 180))\n self._buttons.update({config.MENU_HIGHSCORES: view_highscores_button})\n help_button = Button('Help', 10, (90, 210))\n self._buttons.update({config.MENU_HELP: help_button})\n quit_button = Button('Quit', 10, (90, 240))\n self._buttons.update({config.MENU_QUIT: quit_button})\n \n # Radio Selections\n self._radios = {}\n level_selection = Radio_Selection([str(n + 1) for n in range(9)],\n 10, (160, 365))\n self._radios.update({config.MENU_LEVEL: level_selection})\n order_selection = Radio_Selection([str(n + 1) for n in range(6)],\n 10, (160, 425))\n self._radios.update({config.MENU_ORDER: order_selection})\n sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))\n self._radios.update({config.MENU_SFX: sfx_selection})\n music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))\n self._radios.update({config.MENU_MUSIC: music_selection})\n \n # Highscores Screen\n self._highscores = self._pygame.Surface((250, 300))\n self._highscores = self._highscores.convert()\n self._highscores.fill((0, 0, 0))\n \n draw_text(self._highscores, (15, 10), 'Highscores:', 10,\n self._white, self._pygame)\n \n # Enter highscore name screen\n self._enterhighscore = self._pygame.Surface((250, 210))\n self._enterhighscore = self._enterhighscore.convert()\n self._enterhighscore.fill((0, 0, 0))\n self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))\n \n # Help Screen\n self._help = self._pygame.Surface((410, 240))\n self._help = self._help.convert()\n self._help.fill((0, 0, 0))\n \n draw_text(self._help, (15, 10), 'Controls:', 10, self._white,\n self._pygame)\n draw_text(self._help, (205, 10), 'Instructions:', 10,\n self._white, self._pygame)\n \n draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 75), 'Left - Move Left', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 105), 'Right - Move Right', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 135), 'Down - Move Down', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 165), 'Space - Drop', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,\n self._pygame)\n \n text = config.instructions\n rect = self._pygame.Rect(0, 0, 190, 190)\n instructions = render_textrect(text, 8, rect, self._white,\n (0, 0, 0), 0, self._pygame)\n self._help.blit(instructions, (210, 45))\n ",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import socket
import struct
def parsing_ethernet_header(data):
ethernet_header=struct.unpack("!6c6c2s",data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header="0x"+ethernet_header[12].hex()
print("=========ethernet header==========")
print("src_mac_address:", ether_src)
print("dest_mac_address:",ether_dest)
print("ip_version",ip_header)
def convert_ethernet_address(data):
ethernet_addr =list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr=":".join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header=struct.unpack("!1c1c2s2s2s1c1c2s4c4c",data)
print("============ip header=============")
ip_ver_len= int(ip_header[0].hex(), 16)
print("ip_version:",ip_ver_len // 16)
print("ip_length:", ip_ver_len % 16)
differ_expli=int(ip_header[1].hex(),16)
print("differentiated_service_codepoint:",differ_expli//16)
print("explicit_congestion_notification:",differ_expli%16)
total_length=int(ip_header[2].hex(),16)
print("total_length:",total_length)
identification=ip_header[3].hex()
print("identification:0x",identification)
flags=ip_header[4].hex()
print("flags:0x",flags)
flags_int=int(ip_header[4].hex(),16)
print(">>>reserved_bit:",flags_int>>15)
print(">>>fragments:",(flags_int>>13)& 0x0001)
print(">>>fragments_offset:",flags_int & 0x1fff)
time_to_live=int(ip_header[5].hex(),16)
print("Time to live:",time_to_live)
protocol=ip_header[6].hex()
print("protocol:0x",protocol)
header_check=ip_header[7].hex()
print("header checksum:0x",header_check)
source_addr=convert_ip_address(ip_header[8:12])
print("source_ip_address:",source_addr)
dest_addr=convert_ip_address(ip_header[12:16])
print("dest_ip_address:",dest_addr)
def ch_UDP_TCP(data):
temp=struct.unpack("1c",data)
result=int(temp[0].hex(),16)
return result
def convert_ip_address(data):
ip_addr=list()
for i in data:
ip_addr.append(str(int(i.hex(),16)) )
ip_addr=".".join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print("=============tcp header==============")
TCP_header=struct.unpack("!2s2s1I1I2s2s2s2s",data)
src_port=int(TCP_header[0].hex(),16)
print("src_port:",src_port)
dec_port=int(TCP_header[1].hex(),16)
print("dec_port:",dec_port)
seq_num=TCP_header[2]
print("seq_num:",seq_num)
ack_num=TCP_header[3]
print("ack_num:",ack_num)
header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f
print("header_len:",header_len)
flags=int(TCP_header[4].hex(),16)&0x0fff
print("flags:",flags)
reserved=flags>>9
print(">>>reserved",reserved)
nonce=(flags>>8)&0x001
print(">>>nonce:",nonce)
cwr=(flags>>7)&0x001
print(">>>cwr:",cwr)
urgent=(flags>>5)&0x001
print(">>>urgent:",urgent)
ack=(flags>>4)&0x001
print(">>>ack:",ack)
push=(flags>>3)&0x001
print(">>>push:",push)
reset=(flags>>2)&0x001
print(">>>reset:",reset)
syn=(flags>>1)&0x001
print(">>>syn:",syn)
fin=flags&0x001
print(">>>fin:",fin)
window_size=int(TCP_header[5].hex(),16)
print("Window_size_value:",window_size)
checksum=int(TCP_header[6].hex(),16)
print("checksum:",checksum)
urgent_pointer=int(TCP_header[7].hex(),16)
print("urgent_pointer:",urgent_pointer)
def parsing_UDP_header(data):
UDP_header=struct.unpack("2s2s2s2s",data)
print("=============udp_header=============")
src_port=int(UDP_header[0].hex(),16)
print("src_port:",src_port)
dst_port=int(UDP_header[1].hex(),16)
print("dst_port:",dst_port)
leng=int(UDP_header[2].hex(),16)
print("leng:",leng)
header_checksum=UDP_header[3].hex()
print("header_checksum:0x",header_checksum)
recv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))
print("<<<<<<Packet Capture Start>>>>>>>")
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag =ch_UDP_TCP(data[0][23:24])
if flag==6:
parsing_TCP_header(data[0][34:54])
elif flag==17:
parsing_UDP_header(data[0][34:42])
|
normal
|
{
"blob_id": "9b715fb95e89804a57ea77a98face673b57220c6",
"index": 4494,
"step-1": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-3": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-4": "import socket\nimport struct\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-5": "import socket\nimport struct\n\ndef parsing_ethernet_header(data):\n ethernet_header=struct.unpack(\"!6c6c2s\",data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header=\"0x\"+ethernet_header[12].hex()\n\n print(\"=========ethernet header==========\")\n print(\"src_mac_address:\", ether_src)\n print(\"dest_mac_address:\",ether_dest)\n print(\"ip_version\",ip_header)\n\ndef convert_ethernet_address(data):\n ethernet_addr =list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr=\":\".join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header=struct.unpack(\"!1c1c2s2s2s1c1c2s4c4c\",data)\n \n print(\"============ip header=============\")\n \n ip_ver_len= int(ip_header[0].hex(), 16)\n print(\"ip_version:\",ip_ver_len // 16)\n print(\"ip_length:\", ip_ver_len % 16)\n\n differ_expli=int(ip_header[1].hex(),16)\n print(\"differentiated_service_codepoint:\",differ_expli//16)\n print(\"explicit_congestion_notification:\",differ_expli%16)\n\n total_length=int(ip_header[2].hex(),16)\n print(\"total_length:\",total_length)\n \n identification=ip_header[3].hex()\n print(\"identification:0x\",identification)\n\n flags=ip_header[4].hex()\n print(\"flags:0x\",flags)\n flags_int=int(ip_header[4].hex(),16)\n print(\">>>reserved_bit:\",flags_int>>15)\n print(\">>>fragments:\",(flags_int>>13)& 0x0001)\n print(\">>>fragments_offset:\",flags_int & 0x1fff)\n\n\n time_to_live=int(ip_header[5].hex(),16)\n print(\"Time to live:\",time_to_live)\n\n protocol=ip_header[6].hex()\n print(\"protocol:0x\",protocol)\n\n header_check=ip_header[7].hex()\n print(\"header checksum:0x\",header_check)\n\n source_addr=convert_ip_address(ip_header[8:12])\n print(\"source_ip_address:\",source_addr)\n\n dest_addr=convert_ip_address(ip_header[12:16])\n print(\"dest_ip_address:\",dest_addr)\n\ndef ch_UDP_TCP(data):\n temp=struct.unpack(\"1c\",data)\n result=int(temp[0].hex(),16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr=list()\n for i in data:\n ip_addr.append(str(int(i.hex(),16)) ) \n ip_addr=\".\".join(ip_addr)\n return ip_addr\n\ndef parsing_TCP_header(data):\n print(\"=============tcp header==============\")\n TCP_header=struct.unpack(\"!2s2s1I1I2s2s2s2s\",data)\n\n src_port=int(TCP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dec_port=int(TCP_header[1].hex(),16)\n print(\"dec_port:\",dec_port)\n\n seq_num=TCP_header[2]\n print(\"seq_num:\",seq_num)\n\n ack_num=TCP_header[3]\n print(\"ack_num:\",ack_num)\n\n header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f\n print(\"header_len:\",header_len)\n\n flags=int(TCP_header[4].hex(),16)&0x0fff\n print(\"flags:\",flags)\n\n reserved=flags>>9\n print(\">>>reserved\",reserved)\n\n nonce=(flags>>8)&0x001\n print(\">>>nonce:\",nonce)\n\n cwr=(flags>>7)&0x001\n print(\">>>cwr:\",cwr)\n\n urgent=(flags>>5)&0x001\n print(\">>>urgent:\",urgent)\n\n ack=(flags>>4)&0x001\n print(\">>>ack:\",ack)\n\n push=(flags>>3)&0x001\n print(\">>>push:\",push)\n\n reset=(flags>>2)&0x001\n print(\">>>reset:\",reset)\n\n syn=(flags>>1)&0x001\n print(\">>>syn:\",syn)\n\n fin=flags&0x001\n print(\">>>fin:\",fin)\n\n window_size=int(TCP_header[5].hex(),16)\n print(\"Window_size_value:\",window_size)\n\n checksum=int(TCP_header[6].hex(),16)\n print(\"checksum:\",checksum)\n\n urgent_pointer=int(TCP_header[7].hex(),16)\n print(\"urgent_pointer:\",urgent_pointer)\n\ndef parsing_UDP_header(data):\n UDP_header=struct.unpack(\"2s2s2s2s\",data)\n print(\"=============udp_header=============\")\n\n src_port=int(UDP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dst_port=int(UDP_header[1].hex(),16)\n print(\"dst_port:\",dst_port)\n\n leng=int(UDP_header[2].hex(),16)\n print(\"leng:\",leng)\n\n header_checksum=UDP_header[3].hex()\n print(\"header_checksum:0x\",header_checksum)\n\n\n\nrecv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))\n\nprint(\"<<<<<<Packet Capture Start>>>>>>>\")\n\nwhile True:\n \n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n\n flag =ch_UDP_TCP(data[0][23:24])\n \n if flag==6:\n parsing_TCP_header(data[0][34:54])\n\n elif flag==17:\n parsing_UDP_header(data[0][34:42])\n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class FoodCategory(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
db_table = 'kitchenrock_category'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FoodCategory(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
db_table = 'kitchenrock_category'
def __str__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FoodCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
class Meta:
db_table = 'kitchenrock_category'
def __str__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
class FoodCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
class Meta:
db_table = 'kitchenrock_category'
def __str__(self):
return self.name
|
flexible
|
{
"blob_id": "9bb1fc4df80d183c70d70653faa3428964b93a94",
"index": 9494,
"step-1": "<mask token>\n\n\nclass FoodCategory(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FoodCategory(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass FoodCategory(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass FoodCategory(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def process_textfile(
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
}
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
output_file = "EncryptDecrypt.txt"
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
"""
|
normal
|
{
"blob_id": "5dccd015a90927e8d2a9c0ea4b11b24bfd4bb65e",
"index": 5690,
"step-1": "<mask token>\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "import os\nimport pprint\nimport math\nimport sys\nimport datetime as dt\nfrom pathlib import Path\nimport RotateCipher\nimport ShiftCipher\nimport TranspositionCipher\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "import os\r\nimport pprint\r\nimport math\r\nimport sys\r\nimport datetime as dt\r\nfrom pathlib import Path\r\n\r\nimport RotateCipher\r\nimport ShiftCipher\r\nimport TranspositionCipher\r\n\r\n\r\ndef process_textfile(\r\n string_path: str,\r\n encryption_algorithm: str,\r\n algorithm_key: float,\r\n output_folderpath: str = str(\r\n Path(os.path.expandvars(\"$HOME\")).anchor\r\n ) + r\"/EncryptDecrypt/\",\r\n output_filename: str = r\"EncryptDecrypt.txt\",\r\n to_decrypt=False,\r\n **kwargs\r\n ):\r\n\r\n encryption_algorithm = encryption_algorithm.lower()\r\n available_algorithms = [\"rotate\", \"transposition\"]\r\n if encryption_algorithm not in available_algorithms:\r\n pprint.pprint(\r\n [\"Enter an algorithm from the list. Not case-sensitive.\",\r\n available_algorithms]\r\n )\r\n return None\r\n\r\n # A single dictionary may be passed as a **kwarg if it is the\r\n # ONLY KEY-WORD ARGUMENT. Else, error is thrown.\r\n lst_kwargs = list(kwargs.values())\r\n if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):\r\n kwargs = lst_kwargs[0]\r\n\r\n # Key in **kwargs overwrites `algorithm_key` function parameter.\r\n if \"algorithm_key\" in kwargs:\r\n algorithm_key = float(kwargs[\"algorithm_key\"])\r\n\r\n # Convert strings saying \"True\" or \"False\" to booleans.\r\n for key, value in kwargs.items():\r\n str_value = str(value)\r\n if str_value.lower() == \"False\":\r\n kwargs[key] = False\r\n elif str_value.lower() == \"True\":\r\n kwargs[key] = True\r\n\r\n output_filename = ('/' + output_filename)\r\n if not (output_filename.endswith(\".txt\")):\r\n output_filename += \".txt\"\r\n\r\n full_outputpath = output_folderpath + output_filename\r\n path_input = Path(string_path)\r\n\r\n # fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.\r\n fileobj_target = open(str(path_input), 'r')\r\n lst_input = fileobj_target.readlines()\r\n # str_input = '\\n'.join(lst_input)\r\n str_input = \"\".join(lst_input)\r\n output_string = \"None\"\r\n\r\n print(\r\n \"\"\"Started processing.\r\n Key-word arguments for %s algorithm:\"\"\" % encryption_algorithm\r\n )\r\n pprint.pprint(kwargs)\r\n\r\n if (encryption_algorithm == \"transposition\") and to_decrypt is True:\r\n output_string = ''.join(\r\n TranspositionCipher.decrypt_transposition(\r\n str_input, int(algorithm_key)\r\n )\r\n )\r\n elif encryption_algorithm == \"transposition\" and not to_decrypt:\r\n output_string = ''.join(\r\n TranspositionCipher.encrypt_transposition(\r\n str_input, int(algorithm_key)\r\n )\r\n )\r\n elif encryption_algorithm == \"rotate\":\r\n warning = \"\"\"\r\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\r\n is ignored. To decrypt, set the key-word argument shift left\r\n so that it reverses the shift direction during encryption.\r\n Ex: If the text was shifted left, i.e. values were swapped\r\n with those \"higher\" up on the list read from left to right, pass\r\n the key-word argument shift_left=False to decrypt.\r\n\r\n RotateCipher's methods can return a list. However, it is\r\n forced to always return a string. Passing return_list=True as\r\n a key-word argument will have no effect. The argument is not\r\n passed to RotateCipher.\r\n \"\"\"\r\n # pprint.pprint(warning) # Included literl \\n and single quotes.\r\n print(warning)\r\n\r\n to_shiftleft = True\r\n if \"shift_left\" in kwargs:\r\n to_shiftleft = kwargs[\"shift_left\"]\r\n\r\n process_numbers = False\r\n if \"shift_numbers\" in kwargs:\r\n process_numbers = kwargs[\"shift_numbers\"]\r\n\r\n output_string = RotateCipher.rot13_e(\r\n string=str_input,\r\n shift_left=to_shiftleft,\r\n rotations=int(algorithm_key),\r\n # return_list=kwargs[\"return_list\"], # Removed for safety.\r\n shift_numbers=process_numbers\r\n )\r\n\r\n if not (os.path.exists(output_folderpath)):\r\n os.mkdir(output_folderpath)\r\n\r\n fileobj_output = open(\r\n full_outputpath,\r\n 'a' # Create a file and open it for writing. Append if exists.\r\n )\r\n fileobj_output.write(\r\n \"\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n\" %\r\n dt.datetime.now()\r\n )\r\n fileobj_output.write(output_string)\r\n fileobj_output.close()\r\n print(\"Done processing. Output folder:\\n{}\".format(\r\n Path(full_outputpath)\r\n )\r\n )\r\n\r\n return {\r\n \"output_file\": Path(full_outputpath).resolve(),\r\n \"output_text\": output_string\r\n }\r\n\r\n\r\ndef manual_test():\r\n dict_processedtext = process_textfile(\r\n string_path=r\"C:\\Users\\Rives\\Downloads\\Quizzes\\Quiz 0 Overwrite Number 1.txt\",\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n shift_left=True\r\n )\r\n print(\"Encrypt ROT1 with default values.\")\r\n # pprint.pprint(\r\n # dict_processedtext\r\n # )\r\n print(dict_processedtext[\"output_file\"])\r\n\r\n dict_processedtext2 = process_textfile(\r\n string_path=dict_processedtext[\"output_file\"],\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Decryptions\",\r\n output_filename=\"Quiz 0 Overwrite Number 1 Decrypted\",\r\n shift_left=False\r\n )\r\n print(\"Decrypt ROT1 with all values user-supplied.\")\r\n print(dict_processedtext[\"output_file\"])\r\n\r\n for i in range(2):\r\n dict_processedtext3a = process_textfile(\r\n string_path=r\"C:\\Users\\Rives\\Downloads\\Quizzes\\Quiz 0 Overwrite Number 2.txt\",\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Encryptions\"\r\n )\r\n print(dict_processedtext3a[\"output_file\"])\r\n\r\n dict_processedtext3b = process_textfile(\r\n string_path=dict_processedtext3a[\"output_file\"],\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Decryptions\",\r\n output_filename=\"Quiz 0 Overwrite Number 2 Decrypted\",\r\n shift_left=False\r\n )\r\n print(dict_processedtext3b[\"output_file\"])\r\n\r\n return None\r\n\r\n\r\ndef main():\r\n\r\n while True:\r\n print(\"Press Enter or New Line to skip entering any input.\\t\")\r\n task = input(\"Encrypt or decrypt? Encrypts by default. Press E/D.\\t\")\r\n algo = input(\"Algorithm? Uses Rotate by default.\\t\")\r\n algorithm_key = float(input(\"Key? Uses 1 by default.\\t\"))\r\n input_filepath = input(\r\n \"\"\"Mandatory / Required.\r\n Full path of target file. Includes file name and extension.\\n\"\"\")\r\n output_folder = input(\r\n \"Optional. Give the path of the output folder.\\n\"\r\n )\r\n output_file = input(\r\n \"Optional. Default output file name is EncryptDecrypt.txt.\\n\")\r\n keyword_arguments = input(\r\n \"\"\"Last question. Depends on algorithm.\r\n Format: \"key=value,key2,value2,...\".\r\n Use comma with no space as separator for two or more items.\\n\"\"\"\r\n )\r\n\r\n while len(input_filepath) == 0:\r\n input_filepath = input(\r\n \"\"\"Mandatory / Required.\r\n Full path of target file.\r\n Includes file name and extension.\\n\"\"\"\r\n )\r\n\r\n dict_kwargs = dict()\r\n for pair in keyword_arguments.split(','):\r\n try:\r\n key, pair = tuple(pair.split('='))\r\n dict_kwargs[key] = pair\r\n except ValueError:\r\n break\r\n\r\n to_decrypt = False\r\n if task.lower().startswith('d'):\r\n to_decrypt = True\r\n\r\n if len(output_folder) == 0:\r\n output_folder = str(Path.cwd().parent / r\"/EncryptDecrypt/\")\r\n\r\n if len(output_file) == 0:\r\n output_file = \"EncryptDecrypt.txt\"\r\n\r\n if len(algo) == 0:\r\n algo = \"rotate\"\r\n\r\n pprint.pprint(\r\n process_textfile(\r\n string_path=input_filepath,\r\n encryption_algorithm=algo,\r\n algorithm_key=algorithm_key,\r\n output_folderpath=output_folder,\r\n output_filename=output_file,\r\n to_decrypt=to_decrypt,\r\n kwargs_dict=dict_kwargs\r\n )\r\n )\r\n print(\r\n \"\"\"Done Running.\r\n Press Q to quit, any other key to process another file.\"\"\")\r\n\r\n to_quit = input()\r\n if to_quit.lower().startswith(\"q\"):\r\n sys.exit()\r\n else:\r\n continue\r\n # manual_test()\r\n\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\"\"\"\r\nNotes:\r\n\r\n*\r\nThe declared parameter data types in python functions are not enforced as of\r\nversion 3.4.\r\n\r\n*\r\nFor some reason, even if the name \"key\" was a parameter for process_textfile,\r\nit was being passed to rot13_e as a string. In the function process_textfile,\r\nVisual Basic also listed \"key\" as a string when passed to rot13_e even though\r\nthe function definition specified its data type as a float and the user input\r\nfor \"key\" was also converted to a float in the main function. This was caused\r\nby a for-loop. When VS Code followed the definition of key (F12) when it\r\nwas passed to rot13_e, VS Code pointed to the temporary variable \"key\" in a\r\nfor-loop. The parameter name was changed as a quick fix.\r\n\r\n- Adding an else clause to the for-loop did not fix it.\r\n- The for-loop declaration was funciton-level code while the call to rot13_e\r\nthat bugged was inside an else-clause. The else-clause holding the call to\r\nrot13_e was also function-level, same as the for-loop declaration. The call\r\nto RotateCipher.rot13_e was assigned to output_string.\r\n\"\"\"\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def findKeyInFile(word, filepath):
with open(filepath) as f:
for line in f.readlines():
if line.count(word) > 0:
return line
return None
<|reserved_special_token_1|>
'''
check if word appear in file
'''
# easier solution :
def findKeyInFile(word, filepath):
with open(filepath) as f:
for line in f.readlines():
if line.count(word) > 0:
return line
return None
|
flexible
|
{
"blob_id": "97fb2388777bcb459b9818495121fdf8318095ca",
"index": 8881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef findKeyInFile(word, filepath):\n with open(filepath) as f:\n for line in f.readlines():\n if line.count(word) > 0:\n return line\n return None\n",
"step-3": "'''\ncheck if word appear in file\n'''\n# easier solution :\ndef findKeyInFile(word, filepath):\n with open(filepath) as f:\n for line in f.readlines():\n if line.count(word) > 0:\n return line\n return None\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from distutils.core import setup, Extension
setup(name='supermodule', version='1.0', \
ext_modules=[Extension('supermodule', ['main.c'])])
|
normal
|
{
"blob_id": "78c8f953b924f3e664570b844bf736a788e9cfb7",
"index": 3607,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='supermodule', version='1.0', ext_modules=[Extension(\n 'supermodule', ['main.c'])])\n",
"step-3": "from distutils.core import setup, Extension\nsetup(name='supermodule', version='1.0', ext_modules=[Extension(\n 'supermodule', ['main.c'])])\n",
"step-4": "from distutils.core import setup, Extension\nsetup(name='supermodule', version='1.0', \\\n ext_modules=[Extension('supermodule', ['main.c'])])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print('arg1:{0}'.format(arg1))
print('arg2:{0}'.format(arg2))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print('arg1:{0}'.format(arg1))
print('arg2:{0}'.format(arg2))
def arbitrary_argument_func(*args):
"""
just use "*" to collect all remaining arguments into a tuple
"""
numargs = len(args)
print('Number of arguments:{0}'.format(numargs))
for i, arg in enumerate(args):
print('Argument {0} is : {1}'.format(i, arg))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print('arg1:{0}'.format(arg1))
print('arg2:{0}'.format(arg2))
def arbitrary_argument_func(*args):
"""
just use "*" to collect all remaining arguments into a tuple
"""
numargs = len(args)
print('Number of arguments:{0}'.format(numargs))
for i, arg in enumerate(args):
print('Argument {0} is : {1}'.format(i, arg))
if __name__ == '__main__':
optional_argument_func('Hello', 'World')
arbitrary_argument_func()
arbitrary_argument_func('hello')
arbitrary_argument_func('hello', 'world', 'again')
<|reserved_special_token_1|>
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright © YXC
# CreateTime: 2016-03-09 10:06:02
"""
Example of functions with arbitrary number arguments
"""
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print("arg1:{0}".format(arg1))
print("arg2:{0}".format(arg2))
def arbitrary_argument_func(*args):
"""
just use "*" to collect all remaining arguments into a tuple
"""
numargs = len(args)
print("Number of arguments:{0}".format(numargs))
for i, arg in enumerate(args):
print("Argument {0} is : {1}".format(i, arg))
if __name__ == "__main__":
optional_argument_func("Hello", "World")
arbitrary_argument_func()
arbitrary_argument_func("hello")
arbitrary_argument_func("hello", "world", "again")
|
flexible
|
{
"blob_id": "061a78650e2abf6a9d1e4796dd349174a8df5cb8",
"index": 8747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\nif __name__ == '__main__':\n optional_argument_func('Hello', 'World')\n arbitrary_argument_func()\n arbitrary_argument_func('hello')\n arbitrary_argument_func('hello', 'world', 'again')\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n# Copyright © YXC\n# CreateTime: 2016-03-09 10:06:02\n\n\"\"\"\nExample of functions with arbitrary number arguments\n\"\"\"\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print(\"arg1:{0}\".format(arg1))\n print(\"arg2:{0}\".format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print(\"Number of arguments:{0}\".format(numargs))\n for i, arg in enumerate(args):\n print(\"Argument {0} is : {1}\".format(i, arg))\n\n\nif __name__ == \"__main__\":\n optional_argument_func(\"Hello\", \"World\")\n arbitrary_argument_func()\n arbitrary_argument_func(\"hello\")\n arbitrary_argument_func(\"hello\", \"world\", \"again\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
API = management.ManagementApi('https://rmq.amqpstorm.io:15671',
'guest', 'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
<|reserved_special_token_1|>
from amqpstorm import management
if __name__ == '__main__':
API = management.ManagementApi('https://rmq.amqpstorm.io:15671',
'guest', 'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
<|reserved_special_token_1|>
from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
|
flexible
|
{
"blob_id": "0279057b3962e4b9839a86fc2e2683ac1da11b1a",
"index": 8665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-3": "from amqpstorm import management\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-4": "from amqpstorm import management\n\nif __name__ == '__main__':\n # If using a self-signed certificate, change verify=True to point at your CA bundle.\n # You can disable certificate verification for testing by passing in verify=False.\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',\n 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import tensorflow as tf
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})
Session looks at all trainable variables that loss depends on and update them
tf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,
name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)
List of optimizers in TF
1. tf.train.GradientDescentOptimizer
2. tf.train.AdagradOptimizer
3. tf.train.MomentumOptimizer
4. tf.train.AdamOptimizer
5. tf.train.ProximalGradientDescentOptimizer
6. tf.train.ProximalAdagradOptimizer
7. tf.train.RMSPropOptimizer
And more
|
normal
|
{
"blob_id": "edb206a8cd5bc48e831142d5632fd7eb90abd209",
"index": 72,
"step-1": "import tensorflow as tf\noptimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})\n\nSession looks at all trainable variables that loss depends on and update them\ntf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,\n name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)\n\nList of optimizers in TF\n1. tf.train.GradientDescentOptimizer\n2. tf.train.AdagradOptimizer\n3. tf.train.MomentumOptimizer\n4. tf.train.AdamOptimizer\n5. tf.train.ProximalGradientDescentOptimizer\n6. tf.train.ProximalAdagradOptimizer\n7. tf.train.RMSPropOptimizer\nAnd more",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Connection:
def __init__(self):
self.connection = pymssql.connect(server='gditsn033\\SQLPROD',
database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Connection:
def __init__(self):
self.connection = pymssql.connect(server='gditsn033\\SQLPROD',
database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
<|reserved_special_token_1|>
__author__ = 'gaa8664'
<|reserved_special_token_0|>
class Connection:
def __init__(self):
self.connection = pymssql.connect(server='gditsn033\\SQLPROD',
database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
<|reserved_special_token_1|>
__author__ = 'gaa8664'
import pymssql
class Connection:
def __init__(self):
self.connection = pymssql.connect(server='gditsn033\\SQLPROD',
database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
<|reserved_special_token_1|>
__author__ = 'gaa8664'
import pymssql
class Connection:
def __init__(self):
self.connection = pymssql.connect(server = 'gditsn033\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
|
flexible
|
{
"blob_id": "12dc248a95a84603065e23ce8fd33163bfcd2d3e",
"index": 9295,
"step-1": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-3": "__author__ = 'gaa8664'\n<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-4": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-5": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server = 'gditsn033\\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/2/18 22:27
# @Author : name
# @File : 01.requests第一血.py
import requests
if __name__ == "__main__":
# step1:指定url
url = r'https://www.sogou.com/'
# step2:发起请求
reponse = requests.get(url = url)
# setp3:获取响应数据 text返回的是字符串形式的响应数据
page_text = reponse.text
print(page_text)
# step4:持久化存储
with open('./sogou.html', 'w', encoding='utf-8') as fp:
fp.write(page_text)
print('爬取数据结束!')
|
normal
|
{
"blob_id": "7ae6ed8797d6ee02effd04750e243c5a59840177",
"index": 8444,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n url = 'https://www.sogou.com/'\n reponse = requests.get(url=url)\n page_text = reponse.text\n print(page_text)\n with open('./sogou.html', 'w', encoding='utf-8') as fp:\n fp.write(page_text)\n print('爬取数据结束!')\n",
"step-3": "import requests\nif __name__ == '__main__':\n url = 'https://www.sogou.com/'\n reponse = requests.get(url=url)\n page_text = reponse.text\n print(page_text)\n with open('./sogou.html', 'w', encoding='utf-8') as fp:\n fp.write(page_text)\n print('爬取数据结束!')\n",
"step-4": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2021/2/18 22:27\r\n# @Author : name\r\n# @File : 01.requests第一血.py\r\n\r\nimport requests\r\n\r\nif __name__ == \"__main__\":\r\n # step1:指定url\r\n url = r'https://www.sogou.com/'\r\n # step2:发起请求\r\n reponse = requests.get(url = url)\r\n # setp3:获取响应数据 text返回的是字符串形式的响应数据\r\n page_text = reponse.text\r\n print(page_text)\r\n # step4:持久化存储\r\n with open('./sogou.html', 'w', encoding='utf-8') as fp:\r\n fp.write(page_text)\r\n print('爬取数据结束!')\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def login():
while True:
name = input('请输入昵称(不能重复)')
msg = 'LOGIN' + '##' + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == '0':
print('昵称已存在,请重新输入')
continue
else:
print('你已进入聊天室')
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input('>>>>')
except KeyboardInterrupt:
print('程序退出')
content = ''
if not content:
my_exit(name)
msg = 'CHAT' + '##' + f'{name}:' + content
udp_socket.sendto(msg.encode(), ADDR)
print('你发送了一条消息')
<|reserved_special_token_0|>
def receive():
while True:
data, addr = udp_socket.recvfrom(1024)
print('\n' + data.decode() + '\n>>>', end='')
def main():
name = login()
chat(name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def login():
while True:
name = input('请输入昵称(不能重复)')
msg = 'LOGIN' + '##' + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == '0':
print('昵称已存在,请重新输入')
continue
else:
print('你已进入聊天室')
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input('>>>>')
except KeyboardInterrupt:
print('程序退出')
content = ''
if not content:
my_exit(name)
msg = 'CHAT' + '##' + f'{name}:' + content
udp_socket.sendto(msg.encode(), ADDR)
print('你发送了一条消息')
def my_exit(name):
msg = 'EXIT' + '##' + name
print('您已退出聊天室')
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive():
while True:
data, addr = udp_socket.recvfrom(1024)
print('\n' + data.decode() + '\n>>>', end='')
def main():
name = login()
chat(name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ADDR = '127.0.0.1', 8888
udp_socket = socket(AF_INET, SOCK_DGRAM)
def login():
while True:
name = input('请输入昵称(不能重复)')
msg = 'LOGIN' + '##' + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == '0':
print('昵称已存在,请重新输入')
continue
else:
print('你已进入聊天室')
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input('>>>>')
except KeyboardInterrupt:
print('程序退出')
content = ''
if not content:
my_exit(name)
msg = 'CHAT' + '##' + f'{name}:' + content
udp_socket.sendto(msg.encode(), ADDR)
print('你发送了一条消息')
def my_exit(name):
msg = 'EXIT' + '##' + name
print('您已退出聊天室')
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive():
while True:
data, addr = udp_socket.recvfrom(1024)
print('\n' + data.decode() + '\n>>>', end='')
def main():
name = login()
chat(name)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from socket import *
from multiprocessing import Process
import sys
ADDR = '127.0.0.1', 8888
udp_socket = socket(AF_INET, SOCK_DGRAM)
def login():
while True:
name = input('请输入昵称(不能重复)')
msg = 'LOGIN' + '##' + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == '0':
print('昵称已存在,请重新输入')
continue
else:
print('你已进入聊天室')
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input('>>>>')
except KeyboardInterrupt:
print('程序退出')
content = ''
if not content:
my_exit(name)
msg = 'CHAT' + '##' + f'{name}:' + content
udp_socket.sendto(msg.encode(), ADDR)
print('你发送了一条消息')
def my_exit(name):
msg = 'EXIT' + '##' + name
print('您已退出聊天室')
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive():
while True:
data, addr = udp_socket.recvfrom(1024)
print('\n' + data.decode() + '\n>>>', end='')
def main():
name = login()
chat(name)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from socket import *
from multiprocessing import Process
import sys
ADDR = ("127.0.0.1", 8888)
udp_socket = socket(AF_INET, SOCK_DGRAM)
# udp_socket.bind(("0.0.0.0",6955)) # udp套接字在一段时间不链接后,会自动重新分配端口,所以需要绑定
def login():
while True:
name = input("请输入昵称(不能重复)")
msg = "LOGIN" + "##" + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == "0":
print("昵称已存在,请重新输入")
continue
else:
print("你已进入聊天室")
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input(">>>>")
except KeyboardInterrupt:
print("程序退出")
content = "" # 如果阻塞在input ctrl c 退出的话,调用my_exit函数
if not content:
my_exit(name)
msg = "CHAT" + "##" + f"{name}:" + content
udp_socket.sendto(msg.encode(), ADDR)
print("你发送了一条消息")
def my_exit(name):
msg = "EXIT" + "##" + name
print("您已退出聊天室")
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive(): # 作为子进程,收到消息然后打印出收到的内容
while True:
data, addr = udp_socket.recvfrom(1024)
print("\n" + data.decode() + "\n>>>", end="")
def main():
name = login()
chat(name)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "fd6cf903490ff4352e4721282354a68437ecb1e0",
"index": 8314,
"step-1": "<mask token>\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\n<mask token>\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\n<mask token>\n",
"step-3": "<mask token>\nADDR = '127.0.0.1', 8888\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from socket import *\nfrom multiprocessing import Process\nimport sys\nADDR = '127.0.0.1', 8888\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from socket import *\nfrom multiprocessing import Process\nimport sys\n\nADDR = (\"127.0.0.1\", 8888)\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n# udp_socket.bind((\"0.0.0.0\",6955)) # udp套接字在一段时间不链接后,会自动重新分配端口,所以需要绑定\n\n\ndef login():\n while True:\n name = input(\"请输入昵称(不能重复)\")\n msg = \"LOGIN\" + \"##\" + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == \"0\":\n print(\"昵称已存在,请重新输入\")\n continue\n else:\n print(\"你已进入聊天室\")\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input(\">>>>\")\n except KeyboardInterrupt:\n print(\"程序退出\")\n content = \"\" # 如果阻塞在input ctrl c 退出的话,调用my_exit函数\n if not content:\n my_exit(name)\n msg = \"CHAT\" + \"##\" + f\"{name}:\" + content\n udp_socket.sendto(msg.encode(), ADDR)\n print(\"你发送了一条消息\")\n\n\ndef my_exit(name):\n msg = \"EXIT\" + \"##\" + name\n print(\"您已退出聊天室\")\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive(): # 作为子进程,收到消息然后打印出收到的内容\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print(\"\\n\" + data.decode() + \"\\n>>>\", end=\"\")\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint', version=get_version(), packages=['cmakelint'],
scripts=['bin/cmakelint'], entry_points={'console_scripts': [
'cmakelint = cmakelint.main:main']}, install_requires=[''], author=
'Richard Quirk', author_email='[email protected]', url=
'https://github.com/richq/cmake-lint', download_url=
'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],
classifiers=['Topic :: Software Development',
'Programming Language :: Other', 'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'], description=
'Static code checker for CMake files', long_description=
'cmakelint parses CMake files and reports style issues.', license=
'Apache 2.0')
<|reserved_special_token_1|>
from setuptools import setup
import imp
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint', version=get_version(), packages=['cmakelint'],
scripts=['bin/cmakelint'], entry_points={'console_scripts': [
'cmakelint = cmakelint.main:main']}, install_requires=[''], author=
'Richard Quirk', author_email='[email protected]', url=
'https://github.com/richq/cmake-lint', download_url=
'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],
classifiers=['Topic :: Software Development',
'Programming Language :: Other', 'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'], description=
'Static code checker for CMake files', long_description=
'cmakelint parses CMake files and reports style issues.', license=
'Apache 2.0')
<|reserved_special_token_1|>
from setuptools import setup
import imp
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', ['cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint',
version=get_version(),
packages=['cmakelint'],
scripts=['bin/cmakelint'],
entry_points={
'console_scripts': [
'cmakelint = cmakelint.main:main'
]
},
install_requires=[''],
author="Richard Quirk",
author_email="[email protected]",
url="https://github.com/richq/cmake-lint",
download_url="https://github.com/richq/cmake-lint",
keywords=["cmake", "lint"],
classifiers=[
"Topic :: Software Development",
"Programming Language :: Other",
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License"],
description="Static code checker for CMake files",
long_description="""cmakelint parses CMake files and reports style issues.""",
license="Apache 2.0")
|
flexible
|
{
"blob_id": "b3d9013ab6facb8dd9361e2a0715a8ed0cdfeaba",
"index": 342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint', version=get_version(), packages=['cmakelint'],\n scripts=['bin/cmakelint'], entry_points={'console_scripts': [\n 'cmakelint = cmakelint.main:main']}, install_requires=[''], author=\n 'Richard Quirk', author_email='[email protected]', url=\n 'https://github.com/richq/cmake-lint', download_url=\n 'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],\n classifiers=['Topic :: Software Development',\n 'Programming Language :: Other', 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License'], description=\n 'Static code checker for CMake files', long_description=\n 'cmakelint parses CMake files and reports style issues.', license=\n 'Apache 2.0')\n",
"step-4": "from setuptools import setup\nimport imp\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint', version=get_version(), packages=['cmakelint'],\n scripts=['bin/cmakelint'], entry_points={'console_scripts': [\n 'cmakelint = cmakelint.main:main']}, install_requires=[''], author=\n 'Richard Quirk', author_email='[email protected]', url=\n 'https://github.com/richq/cmake-lint', download_url=\n 'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],\n classifiers=['Topic :: Software Development',\n 'Programming Language :: Other', 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License'], description=\n 'Static code checker for CMake files', long_description=\n 'cmakelint parses CMake files and reports style issues.', license=\n 'Apache 2.0')\n",
"step-5": "from setuptools import setup\n\nimport imp\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', ['cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description)\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint',\n version=get_version(),\n packages=['cmakelint'],\n scripts=['bin/cmakelint'],\n entry_points={\n 'console_scripts': [\n 'cmakelint = cmakelint.main:main'\n ]\n },\n install_requires=[''],\n author=\"Richard Quirk\",\n author_email=\"[email protected]\",\n url=\"https://github.com/richq/cmake-lint\",\n download_url=\"https://github.com/richq/cmake-lint\",\n keywords=[\"cmake\", \"lint\"],\n classifiers=[\n \"Topic :: Software Development\",\n \"Programming Language :: Other\",\n \"Programming Language :: Python\",\n \"License :: OSI Approved :: Apache Software License\"],\n description=\"Static code checker for CMake files\",\n long_description=\"\"\"cmakelint parses CMake files and reports style issues.\"\"\",\n license=\"Apache 2.0\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class View1(LoginRequiredMixin, View):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
<|reserved_special_token_0|>
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view1')
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
<|reserved_special_token_1|>
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view1')
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Contenu view2')
<|reserved_special_token_1|>
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view1")
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
|
flexible
|
{
"blob_id": "826abb18b11afd7a010e2bfc5a29ba068218c23a",
"index": 7550,
"step-1": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n <mask token>\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-2": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-3": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-4": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-5": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\nclass View1(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view1\")\n\nclass View2(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from graphingwiki import values_to_form
from graphingwiki.editing import iter_metas
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-"
"""
getMetaStream action for graphingwiki
- alternative meta retrieval action that uses
abuse-sa query language for filtering metas
and returns Line Delimeted JSON or event-stream
@copyright: 2015 Lauri Pokka <[email protected]>
@license: MIT <http://www.opensource.org/licenses/mit-license.php>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from graphingwiki import values_to_form
from graphingwiki.editing import iter_metas
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return "data: " + row + "\n\n"
except StopIteration:
self.done = True
return "event: done\ndata: \n\n"
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [""])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == "stream" or "text/event-stream" in accepts:
request.content_type = "text/event-stream"
## send_file seems to be the least hacky way
## for sending streamed content in MoinMoin
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = "application/json;boundary=NL"
for row in json_rows:
request.write(row + "\n")
except ImportError:
request.status_code = 501
request.write(u"abusehelper package not available")
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
|
flexible
|
{
"blob_id": "c67cd3c16c15d6aab02a07736c83bbdd5bd98514",
"index": 1839,
"step-1": "<mask token>\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n",
"step-3": "<mask token>\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n",
"step-4": "<mask token>\nfrom graphingwiki import values_to_form\nfrom graphingwiki.editing import iter_metas\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n",
"step-5": "# -*- coding: utf-8 -*-\"\n\"\"\"\n getMetaStream action for graphingwiki\n - alternative meta retrieval action that uses\n abuse-sa query language for filtering metas\n and returns Line Delimeted JSON or event-stream\n\n @copyright: 2015 Lauri Pokka <[email protected]>\n @license: MIT <http://www.opensource.org/licenses/mit-license.php>\n\n Permission is hereby granted, free of charge, to any person\n obtaining a copy of this software and associated documentation\n files (the \"Software\"), to deal in the Software without\n restriction, including without limitation the rights to use, copy,\n modify, merge, publish, distribute, sublicense, and/or sell copies\n of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n DEALINGS IN THE SOFTWARE.\n\n\"\"\"\n\nfrom graphingwiki import values_to_form\nfrom graphingwiki.editing import iter_metas\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return \"data: \" + row + \"\\n\\n\"\n except StopIteration:\n self.done = True\n return \"event: done\\ndata: \\n\\n\"\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [\"\"])[0]\n try:\n json_rows = metas_to_json(request, query)\n\n accepts = unicode(request.request.accept_mimetypes)\n\n if output_format == \"stream\" or \"text/event-stream\" in accepts:\n request.content_type = \"text/event-stream\"\n\n ## send_file seems to be the least hacky way\n ## for sending streamed content in MoinMoin\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = \"application/json;boundary=NL\"\n\n for row in json_rows:\n request.write(row + \"\\n\")\n\n except ImportError:\n request.status_code = 501\n request.write(u\"abusehelper package not available\")\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
#read data from file
#read data from file
theFile = open('datapri.txt','r')
temp = []
#n la so phan tu cua mang mau
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n,n)
k = 0
for i in range(n):
for j in range(n):
arr[i,j] = temp[k]
k = k+1
# print(arr)
#tao 1 mang de chua ma tran cac dinh ke
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
#dua cac dinh vao mang ke
for i in range(n):
for j in range(n):
if(arr[i,j] != 0):
ke[i].append(j)
trongso[i].append(arr[i,j])
print(trongso[1])
# available = [False for i in range(n)]
# vertex = [0 for i in range(n)]
#
# def CorlorGraph():
# #khoi tao dinh dau tien duoc to mau dau tien
# vertex[0] = 0
#
# #khoi tao cac dinh con lai chua duoc to mau
# for i in range(1,n):
# vertex[i] = -1
#
# #to mau cac dinh con lai
# for i in range(1,n):
# for j in (ke[i]):
# if(vertex[j] != -1):
# available[vertex[j]] = True
#
# crz = 0
# for k in range(n):
# if (available[k] == False):
# break
# crz = crz + 1
# vertex[i] = crz
# for j in (ke[i]):
# if (vertex[j] != -1):
# available[vertex[j]] = False
# for i in range(n):
# print("ke",i,"-",ke[i])
# CorlorGraph()
# print("Cac dinh da duoc to mau: ")
# for i in range(n):
# print(i,vertex[i])
|
normal
|
{
"blob_id": "aa801bc8398cdf69a15d04188dd8429e4624150e",
"index": 5574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n<mask token>\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\n<mask token>\nfor i in range(n):\n ke.append([])\n<mask token>\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-3": "<mask token>\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-4": "import numpy as np\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-5": "import numpy as np\n#read data from file\n#read data from file\n\ntheFile = open('datapri.txt','r')\ntemp = []\n#n la so phan tu cua mang mau\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n\narr = np.random.rand(n,n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i,j] = temp[k]\n k = k+1\n# print(arr)\n#tao 1 mang de chua ma tran cac dinh ke\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\n#dua cac dinh vao mang ke\nfor i in range(n):\n for j in range(n):\n if(arr[i,j] != 0):\n ke[i].append(j)\n trongso[i].append(arr[i,j])\nprint(trongso[1])\n# available = [False for i in range(n)]\n# vertex = [0 for i in range(n)]\n#\n# def CorlorGraph():\n# #khoi tao dinh dau tien duoc to mau dau tien\n# vertex[0] = 0\n#\n# #khoi tao cac dinh con lai chua duoc to mau\n# for i in range(1,n):\n# vertex[i] = -1\n#\n# #to mau cac dinh con lai\n# for i in range(1,n):\n# for j in (ke[i]):\n# if(vertex[j] != -1):\n# available[vertex[j]] = True\n#\n# crz = 0\n# for k in range(n):\n# if (available[k] == False):\n# break\n# crz = crz + 1\n# vertex[i] = crz\n# for j in (ke[i]):\n# if (vertex[j] != -1):\n# available[vertex[j]] = False\n# for i in range(n):\n# print(\"ke\",i,\"-\",ke[i])\n# CorlorGraph()\n# print(\"Cac dinh da duoc to mau: \")\n# for i in range(n):\n# print(i,vertex[i])\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
class LaughsappConfig(AppConfig):
name = 'laughsApp'
|
normal
|
{
"blob_id": "6b785502e8a8983c164ebdffdd304da47c926acb",
"index": 774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LaughsappConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LaughsappConfig(AppConfig):\n name = 'laughsApp'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass LaughsappConfig(AppConfig):\n name = 'laughsApp'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df.to_csv('linkedin_jobs.csv', index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
chrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')
df = get_jobs('Data Scientist', 40, False, chrome_driver_path)
df.to_csv('linkedin_jobs.csv', index=False)
<|reserved_special_token_1|>
import os
from linkedin_scraper import get_jobs
chrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')
df = get_jobs('Data Scientist', 40, False, chrome_driver_path)
df.to_csv('linkedin_jobs.csv', index=False)
|
flexible
|
{
"blob_id": "6ae529a5e5658ba409ec3e7284d8b2911c60dd00",
"index": 906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-3": "<mask token>\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-4": "import os\nfrom linkedin_scraper import get_jobs\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''引入数据,并对数据进行预处理'''
# step 1 引入数据
import pandas as pd
with open('D:\\Desktop\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:
df = pd.read_csv(data_obj)
# Step 2 对数据进行预处理
# 对离散属性进行独热编码,定性转为定量,使每一个特征的取值作为一个新的特征
# 增加特征量 Catagorical Variable -> Dummy Variable
# 两种方法:Dummy Encoding VS One Hot Encoding
# 相同点:将Catagorical Variable转换为定量特征
# 不同点:Dummy Variable将Catagorical Variable转为n-1个特征变量
# One Hot Encoding 将其转换为n个特征变量,但会存在哑变量陷阱问题
# pandas自带的get_dummies()函数,可以将数据集中的所有标称变量转为哑变量
# sklearn 中的OneHotEncoder 也可以实现标称变量转为哑变量(注意要将非数字型提前通过LabelEncoder编码为数字类型,再进行转换,且只能处理单列属性)
# pybrain中的_convertToOneOfMany()可以Converts the target classes to a 1-of-k representation, retaining the old targets as a field class.
# 对target class独热编码,并且保留原target为字段类
'''
dataset = pd.get_dummies(df, columns=df.columns[:6]) # 将离散属性变为哑变量
dataset = pd.get_dummies(dataset, columns=[df.columns[8]]) # 将标签转为哑变量
# columns接受序列形式的对象,单个字符串不行
'''
dataset = pd.get_dummies(df)
pd.set_option('display.max_columns', 1000) # 把所有的列全部显示出来
X = dataset[dataset.columns[:-2]]
Y = dataset[dataset.columns[-2:]]
labels = dataset.columns._data[-2:]
# Step 3:将数据转换为SupervisedDataSet/ClassificationDtaSet对象
from pybrain.datasets import ClassificationDataSet
ds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for i in range(len(Y)):
y = 0
if Y['好瓜_是'][i] == 1:
y = 1
ds.appendLinked(X.ix[i], y)
ds.calculateStatistics() # 返回一个类直方图?搞不懂在做什么
# Step 4: 分开测试集和训练集
testdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
testdata_temp, traindata_temp = ds.splitWithProportion(0.25)
for n in range(testdata_temp.getLength()):
testdata.appendLinked(testdata_temp.getSample(n)[0],testdata_temp.getSample(n)[1])
print(testdata)
testdata._convertToOneOfMany()
print(testdata)
traindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for n in range(traindata_temp.getLength()):
traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.getSample(n)[1])
traindata._convertToOneOfMany()
'''
# 使用sklean的OneHotEncoder
# 缺点是只能单列进行操作,最后再复合,麻烦
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
a = LabelEncoder().fit_transform(df[df.columns[0]])
# dataset_One = OneHotEncoder.fit(df.values[])
# print(df['色泽']) # 单独的Series?
print(a)
aaa = OneHotEncoder(sparse=False).fit_transform(a.reshape(-1, 1))
print(aaa)
# 怎么复合暂时没写
'''
'''开始整神经网络'''
# Step 1 :创建神经网络框架
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import SoftmaxLayer
# 输入数据是 19维,输出是两维,隐层设置为5层
# 输出层使用Softmax激活,其他:学习率(learningrate=0.01),学习率衰减(lrdecay=1.0,每次训练一步学习率乘以),
# 详细(verbose=False)动量因子(momentum=0最后时步的梯度?),权值衰减?(weightdecay=0.0)
n_h = 5
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)
# Step 2 : 构建前馈网络标准BP算法
from pybrain.supervised import BackpropTrainer
trainer_sd = BackpropTrainer(net, traindata)
# # 或者使用累积BP算法,训练次数50次
# trainer_ac = BackpropTrainer(net, traindata, batchlearning=True)
# trainer_ac.trainEpochs(50)
# err_train, err_valid = trainer_ac.trainUntilConvergence(maxEpochs=50)
for i in range(50): # 训练50次,每及测试结果次打印训练结果
trainer_sd.trainEpochs(1) # 训练网络一次,
# 引入训练误差和测试误差
from pybrain.utilities import percentError
trainresult = percentError(trainer_sd.testOnClassData(), traindata['class'])
testresult = percentError(trainer_sd.testOnClassData(dataset=testdata), testdata['class'])
# 打印错误率
print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult, 'test error: ', testresult)
|
normal
|
{
"blob_id": "682b3e1d6d40f4b279052ac27df19268d227fef8",
"index": 6899,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\npd.set_option('display.max_columns', 1000)\n<mask token>\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\n<mask token>\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\n<mask token>\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-3": "<mask token>\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000)\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\n<mask token>\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\n<mask token>\ntrainer_sd = BackpropTrainer(net, traindata)\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-4": "<mask token>\nimport pandas as pd\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000)\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\nfrom pybrain.datasets import ClassificationDataSet\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.structure import SoftmaxLayer\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\nfrom pybrain.supervised import BackpropTrainer\ntrainer_sd = BackpropTrainer(net, traindata)\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-5": "'''引入数据,并对数据进行预处理'''\n\n# step 1 引入数据\nimport pandas as pd\nwith open('D:\\\\Desktop\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n\n# Step 2 对数据进行预处理\n# 对离散属性进行独热编码,定性转为定量,使每一个特征的取值作为一个新的特征\n# 增加特征量 Catagorical Variable -> Dummy Variable\n# 两种方法:Dummy Encoding VS One Hot Encoding\n# 相同点:将Catagorical Variable转换为定量特征\n# 不同点:Dummy Variable将Catagorical Variable转为n-1个特征变量\n# One Hot Encoding 将其转换为n个特征变量,但会存在哑变量陷阱问题\n# pandas自带的get_dummies()函数,可以将数据集中的所有标称变量转为哑变量\n# sklearn 中的OneHotEncoder 也可以实现标称变量转为哑变量(注意要将非数字型提前通过LabelEncoder编码为数字类型,再进行转换,且只能处理单列属性)\n# pybrain中的_convertToOneOfMany()可以Converts the target classes to a 1-of-k representation, retaining the old targets as a field class.\n # 对target class独热编码,并且保留原target为字段类\n'''\ndataset = pd.get_dummies(df, columns=df.columns[:6]) # 将离散属性变为哑变量\ndataset = pd.get_dummies(dataset, columns=[df.columns[8]]) # 将标签转为哑变量\n # columns接受序列形式的对象,单个字符串不行\n'''\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000) # 把所有的列全部显示出来\n\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\n\n# Step 3:将数据转换为SupervisedDataSet/ClassificationDtaSet对象\nfrom pybrain.datasets import ClassificationDataSet\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics() # 返回一个类直方图?搞不懂在做什么\n\n# Step 4: 分开测试集和训练集\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0],testdata_temp.getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.getSample(n)[1])\ntraindata._convertToOneOfMany()\n'''\n# 使用sklean的OneHotEncoder\n# 缺点是只能单列进行操作,最后再复合,麻烦\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelEncoder\na = LabelEncoder().fit_transform(df[df.columns[0]])\n# dataset_One = OneHotEncoder.fit(df.values[])\n# print(df['色泽']) # 单独的Series?\nprint(a)\naaa = OneHotEncoder(sparse=False).fit_transform(a.reshape(-1, 1))\nprint(aaa)\n# 怎么复合暂时没写\n'''\n\n'''开始整神经网络'''\n\n# Step 1 :创建神经网络框架\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.structure import SoftmaxLayer\n# 输入数据是 19维,输出是两维,隐层设置为5层\n# 输出层使用Softmax激活,其他:学习率(learningrate=0.01),学习率衰减(lrdecay=1.0,每次训练一步学习率乘以),\n# 详细(verbose=False)动量因子(momentum=0最后时步的梯度?),权值衰减?(weightdecay=0.0)\n\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\n\n# Step 2 : 构建前馈网络标准BP算法\nfrom pybrain.supervised import BackpropTrainer\ntrainer_sd = BackpropTrainer(net, traindata)\n\n# # 或者使用累积BP算法,训练次数50次\n# trainer_ac = BackpropTrainer(net, traindata, batchlearning=True)\n# trainer_ac.trainEpochs(50)\n# err_train, err_valid = trainer_ac.trainUntilConvergence(maxEpochs=50)\n\nfor i in range(50): # 训练50次,每及测试结果次打印训练结果\n trainer_sd.trainEpochs(1) # 训练网络一次,\n\n # 引入训练误差和测试误差\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class'])\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata), testdata['class'])\n # 打印错误率\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult, 'test error: ', testresult)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
from Ejercicio1 import norma_l2
def sorting_l2(mat):
mat_l2 = norma_l2(mat)
mat_sort_index = np.argsort(mat_l2)
mat_sort_l2 = mat[mat_sort_index, :]
return mat_sort_l2[::-1]
|
normal
|
{
"blob_id": "e280b003c95681ed4a887b0939077efeac9deefe",
"index": 1377,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sorting_l2(mat):\n mat_l2 = norma_l2(mat)\n mat_sort_index = np.argsort(mat_l2)\n mat_sort_l2 = mat[mat_sort_index, :]\n return mat_sort_l2[::-1]\n",
"step-3": "import numpy as np\nfrom Ejercicio1 import norma_l2\n\n\ndef sorting_l2(mat):\n mat_l2 = norma_l2(mat)\n mat_sort_index = np.argsort(mat_l2)\n mat_sort_l2 = mat[mat_sort_index, :]\n return mat_sort_l2[::-1]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height
"""
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies]
self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
self.animComplete = True
print 'breaking out'
break
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1])
|
normal
|
{
"blob_id": "307e7a059f9b0b1131f8a57d0f55cf0ee05173e8",
"index": 9822,
"step-1": "#!/usr/bin/env python\n\"\"\"\nUse version of DriverSlave that has pixmap and pixheights\n\"\"\"\nimport threading\n# import base classes and driver\nfrom bibliopixel import LEDStrip, LEDMatrix\n# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder\nfrom bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder\nfrom bibliopixel.drivers.slave_driver import DriverSlave\n# import colors\nimport bibliopixel.colors\nfrom bibliopixel.animation import BaseStripAnim\nfrom logging import DEBUG, INFO, WARNING, CRITICAL, ERROR\nfrom bibliopixel import log\nlog.setLogLevel(WARNING)\nimport re\nimport time\nfrom operator import or_, ior, ixor\nimport matplotlib.pyplot as plt\nimport BiblioPixelAnimations.matrix.bloom as BA\n\n\nclass MasterAnimation(BaseStripAnim):\n \"\"\"\n Takes copies of fake leds, combines using heights and mixing to fill and update\n a led\n NEED now ledcopies is list of the leds associated with each animation\n NEED also mapping of the leds into master led (i.e. path list)\n NEED also height of each animations and merging method if same height\n \"\"\"\n def __init__(self, led, animcopies, start=0, end=-1):\n super(MasterAnimation, self).__init__(led, start, end)\n if not isinstance(animcopies, list):\n animcopies = [animcopies]\n self._animcopies = animcopies\n self._ledcopies = [a._led for a, f in animcopies]\n self._idlelist = []\n self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!\n self._led.pixheights = [0] * self._led.numLEDs\n\n# def preRun(self, amt=1): \n# self._led.all_off()\n# for w, f in self._animcopies:\n# w.run(fps=f, max_steps=runtime * f, threaded = True)\n\n\t\n def preStep(self, amt=1):\n #print 'prestep {}'.format(self._step)\n # only step the master thread when something from ledcopies\n # has been done i.e. its event _wait must be false (I THINK)\n # TODO is this good code???? or is there a better way to block\n self._idlelist = [True] # to insure goes thru while loop at least once\n while all(self._idlelist):\n self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]\n if self._stopEvent.isSet():\n self.animComplete = True\n print 'breaking out'\n break\n# \n def postStep(self, amt=1):\n # clear the ones found in preStep\n activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n [self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]\n \n def step(self, amt=1):\n \"\"\"\n combines the buffers from the slave led's\n which then gets sent to led via update\n \"\"\"\n \n # For checking if all the animations have their framse looked at\n #activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n #print \"Worm {} at {:5g}\".format(activewormind, 1000*(time.time() - starttime))\n # save times activated for each worm \n [self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]\n \n #self._led.buffer = [0] * 480\n self._led.pixheights = [-100] * self._led.numLEDs\n #print type(self._led.buffer)\n for ledcopy in self._ledcopies:\n # self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)\n # use pixheights but assume all buffers same size\n # print ledcopy.driver[0].pixheights\n for pix in range(self._led.numLEDs):\n #for ledcopy in self._ledcopies:\n if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]\n elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]\n self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix] \n self._step += 1\n \n def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):\n #def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):\n # self.fps = fps\n # self.untilComplete = untilComplete\n super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)\n \n\nclass Worm(BaseStripAnim):\n \"\"\"\n colors a list the worm segment (starting with head) colors\n path a list of the LED indices over which the worm will travel\n cyclelen controls speed, worm movement only when LED upload\n cycles == 0 mod cyclelen\n height (of worm segments) is same length as colors: higher\n value worms segments go over top of lower value worms\n \"\"\"\n def __init__(self, led, colors, path, cyclelen, direction=1,\n height=None, start=0, end=-1):\n super(Worm, self).__init__(led, start, end)\n if height is None:\n height = [0]*len(colors)\n elif type(height) == int:\n height = [height]*len(colors)\n self._colors = colors\n self._colors.append((0, 0, 0)) # add blank seqment to end worm\n self._path = path\n self._cyclelen = cyclelen\n self._height = height\n self._height.append(-1) # add lowest value for height\n self._activecount = 0\n self._direction = direction\n self._headposition = -self._direction\n #print self._colors\n #print self._height\n\n def step(self, amt=1):\n if self._activecount == 0:\n self._headposition += amt*self._direction\n self._headposition %= len(self._path)\n # Put worm into strip and blank end\n segpos = self._headposition\n for x in range(len(self._colors)):\n if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:\n #if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:\n self._led.set(self._path[segpos], self._colors[x])\n self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]\n segpos -= self._direction\n segpos %= len(self._path)\n self._activecount += amt\n self._activecount %= self._cyclelen\n self._step += amt\n\ndef pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):\n \"\"\"\n A path around a rectangle from strip wound helically\n 10 turns high by 16 round.\n rounds * turns must be number of pixels on strip\n nleft and nright is from 0 to rounds-1, \n nbot and ntop from 0 to turns-1\n \"\"\"\n def ind(x, y):\n return x + y * rounds\n \n assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns\n \n nled = rounds*turns\n sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)\n tp = range(ind(nleft, ntop), ind(nright, ntop), 1)\n sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)\n bt = range(ind(nright, nbot), ind(nleft, nbot), -1)\n path = sleft+tp+sright+bt\n if len(path) == 0:\n path = [ind(nleft, nbot)]\n path = map(lambda x: (shift+x) % nled, path)\n log.logger.info(\"pathgen({}, {}, {}, {}, {}) is {}\".format(nleft, nright, nbot, ntop, shift, path))\n return path \n\nif True: #__name__ == '__main__': \n drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)\n # using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024\n #drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)\n #ledmaster = LEDStrip(drivermaster, threadedUpdate=True)\n ledmaster = LEDStrip(drivermaster)\n \n lnin = [255, 222, 200, 150, 125]\n bluedimming = [(0, 0, i) for i in lnin]\n bluedimming = [(0, 0, 0) for i in lnin]\n reddimming = [(i, 0, 0) for i in lnin]\n greendimming = [(0, i, 0) for i in lnin]\n cyandimming = [(0, i, i) for i in lnin]\n whitedimming = [(i, i, i) for i in lnin]\n \n # Worm arguments\n wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)\n wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)\n wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)\n wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)\n wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)\n\n # List of pair (animation arguments, fps)\n wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]\n #wormdatalist = [(wormwhite, 8)]\n #wormdatalist = []\n \n # dummy strips must each have their own slavedriver as thread is attached\n # to the driver\n ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]\n \n # Make the Worm animations an list pairs (animation, fps)\n wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]\n \n ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)\n bloom = BA.Bloom(ledslaveb)\n wormlist.append((bloom, 10))\n \n #masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])\n masteranimation = MasterAnimation(ledmaster, wormlist)\n\n starttime = time.time()\n runtime = 1\n \n # Master steps when it gets a go ahdead signal from one of the\n # concurrent annimations\n masteranimation.run(fps=None) # if give fps for master will skip faster frames \n \n # Run all the slave animations and master threaded\n # The slave animations update their buffers at the correct\n # time and rather than update, just signal the master they \n # are ready to be combined and sent to the actual leds\n\n for w, f in wormlist:\n w.run(fps=f, max_steps=runtime * f, threaded = True)\n \n\n #print threading.enumerate()\n print \"THREADS: \" + \",\".join([re.sub('<class |,|bibliopixel.\\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])\n\n \n # idle and threaded animations will run jointly\n while not all([w.stopped() for w, f in wormlist]):\n pass \n \n # stop the master\n masteranimation.stopThread(True) # need True\n \n print \"Master Animation Step Count {}\".format(masteranimation._step)\n ledmaster.waitForUpdate()\n ledmaster.stopUpdateThreads()\n \n [w._led.stopUpdateThreads() for w, f in wormlist]\n \n print \"THREADS: \" + \",\".join([re.sub('<class |,|bibliopixel.\\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])\n\n plt.clf()\n col = 'brgcwk'\n [plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]\n ax = plt.axis()\n delx = .01 * (ax[1] - ax[0])\n plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1]) \n \n \n\n\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n + 1)):
if 2 ** i < n:
return 2 ** i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n + 1)):
if 2 ** i < n:
return 2 ** i
<|reserved_special_token_0|>
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end=' ')
<|reserved_special_token_0|>
for i in range(t):
n = int(input())
print(pow(2, int(math.log(n, 2))))
<|reserved_special_token_1|>
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n + 1)):
if 2 ** i < n:
return 2 ** i
t = int(input('Enter number of test cases:'))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end=' ')
<|reserved_special_token_0|>
t = int(input())
for i in range(t):
n = int(input())
print(pow(2, int(math.log(n, 2))))
<|reserved_special_token_1|>
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n + 1)):
if 2 ** i < n:
return 2 ** i
t = int(input('Enter number of test cases:'))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end=' ')
import math
t = int(input())
for i in range(t):
n = int(input())
print(pow(2, int(math.log(n, 2))))
<|reserved_special_token_1|>
#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.
#Input:
#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.
#Output:
#Print the position(original queue) of that person who is left.
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n+1)):
if 2**i < n:
return 2**i
t = int(input("Enter number of test cases:"))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end = ' ')
# --------------------------------------------------------------------------------------------------------------------
import math
t = int(input())
for i in range(t):
n =int(input())
print(pow(2,int(math.log(n,2))))
|
flexible
|
{
"blob_id": "358fd8efd5c3823255ab64d5f8b88b343415ed0e",
"index": 2708,
"step-1": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\n",
"step-2": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-3": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-4": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\nimport math\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-5": "#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.\n\n#Input:\n#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.\n\n#Output:\n#Print the position(original queue) of that person who is left.\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef even(n):\n if n == 0 or n == 1:\n return \n elif n == 2:\n return 2\n else: \n for i in reversed(range(n+1)):\n if 2**i < n:\n return 2**i\nt = int(input(\"Enter number of test cases:\"))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)): \n print(arr[i], end = ' ')\n# --------------------------------------------------------------------------------------------------------------------\n\nimport math\nt = int(input())\nfor i in range(t):\n n =int(input())\n print(pow(2,int(math.log(n,2))))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rc.write_network()
<|reserved_special_token_0|>
comp.set_solar_like()
rc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,
Z_range=[1, 13], N_range=[1, 13])
rc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,
comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,
highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,
hide_xalpha=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',
'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',
'ne20', 'mg22', 'mg24'], with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,
Z_range=[1, 13], N_range=[1, 13])
rc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,
comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,
highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,
hide_xalpha=True)
<|reserved_special_token_1|>
import pynucastro as pyna
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',
'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',
'ne20', 'mg22', 'mg24'], with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,
Z_range=[1, 13], N_range=[1, 13])
rc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,
comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,
highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,
hide_xalpha=True)
<|reserved_special_token_1|>
import pynucastro as pyna
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(["h1", "he4",
"c12", "c13",
"n13", "n14", "n15",
"o14", "o15", "o16","o17","o18",
"f17", "f18","f19",
"ne18", "ne19", "ne20",
"mg22", "mg24"],
with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=["fe56"])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile="cno_extras.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])
rc.plot(outfile="cno_extras_hide_alpha.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],
rotated=True, highlight_filter_function=lambda r: r.Q > 0,
curved_edges=True, hide_xalpha=True)
|
flexible
|
{
"blob_id": "39b07f1a515787e80a1fb822e67e19e2301b894a",
"index": 3285,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc.write_network()\n<mask token>\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-3": "<mask token>\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-4": "import pynucastro as pyna\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-5": "import pynucastro as pyna\n\nrl = pyna.ReacLibLibrary()\n\nh_burn = rl.linking_nuclei([\"h1\", \"he4\",\n \"c12\", \"c13\",\n \"n13\", \"n14\", \"n15\",\n \"o14\", \"o15\", \"o16\",\"o17\",\"o18\",\n \"f17\", \"f18\",\"f19\",\n \"ne18\", \"ne19\", \"ne20\",\n \"mg22\", \"mg24\"],\n with_reverse=False)\n\n\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=[\"fe56\"])\n\nrc.write_network()\n\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\n\nrc.plot(outfile=\"cno_extras.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])\nrc.plot(outfile=\"cno_extras_hide_alpha.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],\n rotated=True, highlight_filter_function=lambda r: r.Q > 0,\n curved_edges=True, hide_xalpha=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ..translators.translator import Translator
|
normal
|
{
"blob_id": "ab844143ceddf32982682f5092762af0c97db577",
"index": 391,
"step-1": "<mask token>\n",
"step-2": "from ..translators.translator import Translator\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Area:
<|reserved_special_token_0|>
def square(self):
side = int(input('Enter the length of a side:'))
area = side ** 2
print('Area is :', area, 'cm square')
def rect(self):
print('Enter length and breadth of rectangle:')
le = int(input())
br = int(input())
area = le * br
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def cylinder(self):
rad = int(input('Enter the radius:'))
he = int(input('Enter the height:'))
area = 22 / 7 * rad ** 2 * he
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Area:
<|reserved_special_token_0|>
def square(self):
side = int(input('Enter the length of a side:'))
area = side ** 2
print('Area is :', area, 'cm square')
def rect(self):
print('Enter length and breadth of rectangle:')
le = int(input())
br = int(input())
area = le * br
print('Area is :', area, 'cm square')
def cube(self):
side = int(input('Enter length of a side:'))
area = 6 * side ** 2
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
def cylinder(self):
rad = int(input('Enter the radius:'))
he = int(input('Enter the height:'))
area = 22 / 7 * rad ** 2 * he
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Area:
def circle(self):
rad = int(input('Enter the radius:'))
area = 22 / 7 * rad ** 2
print('Area is :', area, 'cm square')
def square(self):
side = int(input('Enter the length of a side:'))
area = side ** 2
print('Area is :', area, 'cm square')
def rect(self):
print('Enter length and breadth of rectangle:')
le = int(input())
br = int(input())
area = le * br
print('Area is :', area, 'cm square')
def cube(self):
side = int(input('Enter length of a side:'))
area = 6 * side ** 2
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
def cylinder(self):
rad = int(input('Enter the radius:'))
he = int(input('Enter the height:'))
area = 22 / 7 * rad ** 2 * he
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Area:
def circle(self):
rad = int(input('Enter the radius:'))
area = 22 / 7 * rad ** 2
print('Area is :', area, 'cm square')
def square(self):
side = int(input('Enter the length of a side:'))
area = side ** 2
print('Area is :', area, 'cm square')
def rect(self):
print('Enter length and breadth of rectangle:')
le = int(input())
br = int(input())
area = le * br
print('Area is :', area, 'cm square')
def cube(self):
side = int(input('Enter length of a side:'))
area = 6 * side ** 2
print('Area is :', area, 'cm square')
def cuboid(self):
print('Enter length , breadth and height :')
le = int(input())
br = int(input())
he = int(input())
area = 2 * (le * br + br * he + he * le)
print('Area is :', area, 'cm square')
def cylinder(self):
rad = int(input('Enter the radius:'))
he = int(input('Enter the height:'))
area = 22 / 7 * rad ** 2 * he
print('Area is :', area, 'cm square')
<|reserved_special_token_0|>
shape.circle()
shape.square()
shape.rect()
shape.cube()
shape.cuboid()
shape.cylinder()
<|reserved_special_token_1|>
class Area :
def circle(self):
rad = int(input("Enter the radius:"))
area = (22/7)*(rad**2)
print("Area is :" , area , "cm square")
def square(self):
side = int(input("Enter the length of a side:"))
area = side**2
print("Area is :" , area , "cm square")
def rect(self):
print("Enter length and breadth of rectangle:")
le = int(input())
br = int(input())
area = le * br
print("Area is :" , area , "cm square")
def cube(self):
side = int(input("Enter length of a side:"))
area = 6 * (side**2)
print("Area is :" , area , "cm square")
def cuboid(self):
print("Enter length , breadth and height :")
le = int(input())
br= int(input())
he= int(input())
area = 2*(le*br + br*he + he*le)
print("Area is :" , area , "cm square")
def cylinder(self):
rad = int(input("Enter the radius:"))
he = int(input("Enter the height:"))
area = (22/7)*(rad**2)*(he)
print("Area is :" , area , "cm square")
shape = Area()
shape.circle()
shape.square()
shape.rect()
shape.cube()
shape.cuboid()
shape.cylinder()
|
flexible
|
{
"blob_id": "4f36c7e98c54d38aaef9f2ebdafd0c34a157fcd7",
"index": 8268,
"step-1": "class Area:\n <mask token>\n\n def square(self):\n side = int(input('Enter the length of a side:'))\n area = side ** 2\n print('Area is :', area, 'cm square')\n\n def rect(self):\n print('Enter length and breadth of rectangle:')\n le = int(input())\n br = int(input())\n area = le * br\n print('Area is :', area, 'cm square')\n <mask token>\n <mask token>\n\n def cylinder(self):\n rad = int(input('Enter the radius:'))\n he = int(input('Enter the height:'))\n area = 22 / 7 * rad ** 2 * he\n print('Area is :', area, 'cm square')\n\n\n<mask token>\n",
"step-2": "class Area:\n <mask token>\n\n def square(self):\n side = int(input('Enter the length of a side:'))\n area = side ** 2\n print('Area is :', area, 'cm square')\n\n def rect(self):\n print('Enter length and breadth of rectangle:')\n le = int(input())\n br = int(input())\n area = le * br\n print('Area is :', area, 'cm square')\n\n def cube(self):\n side = int(input('Enter length of a side:'))\n area = 6 * side ** 2\n print('Area is :', area, 'cm square')\n <mask token>\n\n def cylinder(self):\n rad = int(input('Enter the radius:'))\n he = int(input('Enter the height:'))\n area = 22 / 7 * rad ** 2 * he\n print('Area is :', area, 'cm square')\n\n\n<mask token>\n",
"step-3": "class Area:\n\n def circle(self):\n rad = int(input('Enter the radius:'))\n area = 22 / 7 * rad ** 2\n print('Area is :', area, 'cm square')\n\n def square(self):\n side = int(input('Enter the length of a side:'))\n area = side ** 2\n print('Area is :', area, 'cm square')\n\n def rect(self):\n print('Enter length and breadth of rectangle:')\n le = int(input())\n br = int(input())\n area = le * br\n print('Area is :', area, 'cm square')\n\n def cube(self):\n side = int(input('Enter length of a side:'))\n area = 6 * side ** 2\n print('Area is :', area, 'cm square')\n <mask token>\n\n def cylinder(self):\n rad = int(input('Enter the radius:'))\n he = int(input('Enter the height:'))\n area = 22 / 7 * rad ** 2 * he\n print('Area is :', area, 'cm square')\n\n\n<mask token>\n",
"step-4": "class Area:\n\n def circle(self):\n rad = int(input('Enter the radius:'))\n area = 22 / 7 * rad ** 2\n print('Area is :', area, 'cm square')\n\n def square(self):\n side = int(input('Enter the length of a side:'))\n area = side ** 2\n print('Area is :', area, 'cm square')\n\n def rect(self):\n print('Enter length and breadth of rectangle:')\n le = int(input())\n br = int(input())\n area = le * br\n print('Area is :', area, 'cm square')\n\n def cube(self):\n side = int(input('Enter length of a side:'))\n area = 6 * side ** 2\n print('Area is :', area, 'cm square')\n\n def cuboid(self):\n print('Enter length , breadth and height :')\n le = int(input())\n br = int(input())\n he = int(input())\n area = 2 * (le * br + br * he + he * le)\n print('Area is :', area, 'cm square')\n\n def cylinder(self):\n rad = int(input('Enter the radius:'))\n he = int(input('Enter the height:'))\n area = 22 / 7 * rad ** 2 * he\n print('Area is :', area, 'cm square')\n\n\n<mask token>\nshape.circle()\nshape.square()\nshape.rect()\nshape.cube()\nshape.cuboid()\nshape.cylinder()\n",
"step-5": "class Area :\n def circle(self):\n rad = int(input(\"Enter the radius:\"))\n area = (22/7)*(rad**2)\n print(\"Area is :\" , area , \"cm square\")\n \n \n def square(self):\n side = int(input(\"Enter the length of a side:\"))\n area = side**2\n print(\"Area is :\" , area , \"cm square\")\n \n def rect(self):\n print(\"Enter length and breadth of rectangle:\")\n le = int(input())\n br = int(input())\n area = le * br\n print(\"Area is :\" , area , \"cm square\")\n \n def cube(self):\n side = int(input(\"Enter length of a side:\"))\n area = 6 * (side**2)\n print(\"Area is :\" , area , \"cm square\")\n \n def cuboid(self):\n print(\"Enter length , breadth and height :\")\n le = int(input())\n br= int(input())\n he= int(input())\n area = 2*(le*br + br*he + he*le)\n print(\"Area is :\" , area , \"cm square\")\n \n def cylinder(self):\n rad = int(input(\"Enter the radius:\"))\n he = int(input(\"Enter the height:\"))\n area = (22/7)*(rad**2)*(he)\n print(\"Area is :\" , area , \"cm square\")\n \n \nshape = Area()\n\nshape.circle()\nshape.square()\nshape.rect()\nshape.cube()\nshape.cuboid()\nshape.cylinder()",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
"""Seed file to make sample data for pets db."""
from models import db, User, Feedback
from app import app
# Create all tables
db.drop_all()
db.create_all()
# If table isn't empty, empty it
User.query.delete()
Feedback.query.delete()
# Add users and posts
john = User(username="John",password="123",email="24",first_name="12a",last_name="123")
# Add new objects to session, so they'll persist
db.session.add(john)
#have to add users first to not violate foreign key constraints
db.session.commit()
feed = Feedback(title="test",content="alsdkjf",username="John")
db.session.add(feed)
# Commit--otherwise, this never gets saved!
db.session.commit()
|
normal
|
{
"blob_id": "d520f9d681125937fbd9dff316bdc5f922f25ff3",
"index": 8050,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\n<mask token>\ndb.session.add(john)\ndb.session.commit()\n<mask token>\ndb.session.add(feed)\ndb.session.commit()\n",
"step-3": "<mask token>\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\njohn = User(username='John', password='123', email='24', first_name='12a',\n last_name='123')\ndb.session.add(john)\ndb.session.commit()\nfeed = Feedback(title='test', content='alsdkjf', username='John')\ndb.session.add(feed)\ndb.session.commit()\n",
"step-4": "<mask token>\nfrom models import db, User, Feedback\nfrom app import app\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\njohn = User(username='John', password='123', email='24', first_name='12a',\n last_name='123')\ndb.session.add(john)\ndb.session.commit()\nfeed = Feedback(title='test', content='alsdkjf', username='John')\ndb.session.add(feed)\ndb.session.commit()\n",
"step-5": "\"\"\"Seed file to make sample data for pets db.\"\"\"\n\nfrom models import db, User, Feedback\nfrom app import app\n\n# Create all tables\ndb.drop_all()\ndb.create_all()\n\n# If table isn't empty, empty it\nUser.query.delete()\nFeedback.query.delete()\n\n\n# Add users and posts\njohn = User(username=\"John\",password=\"123\",email=\"24\",first_name=\"12a\",last_name=\"123\")\n\n# Add new objects to session, so they'll persist\ndb.session.add(john)\n\n\n#have to add users first to not violate foreign key constraints\ndb.session.commit()\n\nfeed = Feedback(title=\"test\",content=\"alsdkjf\",username=\"John\")\n\ndb.session.add(feed)\n\n\n# Commit--otherwise, this never gets saved!\ndb.session.commit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if '=' in line:
exec(compile(line, '', 'exec'))
return locals()['name'], locals()['author'], locals()['version']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if '=' in line:
exec(compile(line, '', 'exec'))
return locals()['name'], locals()['author'], locals()['version']
<|reserved_special_token_0|>
setuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=
'[email protected]', description=
'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',
long_description=long_description, long_description_content_type=
'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files
=[('', ['LICENSE'])], packages=setuptools.find_packages(),
install_requires=['pika'], classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if '=' in line:
exec(compile(line, '', 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=
'[email protected]', description=
'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',
long_description=long_description, long_description_content_type=
'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files
=[('', ['LICENSE'])], packages=setuptools.find_packages(),
install_requires=['pika'], classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'))
<|reserved_special_token_1|>
import sys
import setuptools
from distutils.core import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if '=' in line:
exec(compile(line, '', 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=
'[email protected]', description=
'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',
long_description=long_description, long_description_content_type=
'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files
=[('', ['LICENSE'])], packages=setuptools.find_packages(),
install_requires=['pika'], classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import sys
import setuptools
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if "=" in line:
exec(compile(line, "", 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email="[email protected]",
description="a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smilefufu/PIKACHU",
data_files = [("", ["LICENSE"])],
packages=setuptools.find_packages(),
install_requires=[
"pika",
],
classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'
),
)
|
flexible
|
{
"blob_id": "f14ff29a1a76c2916cb211c476a56aaa5061bf71",
"index": 8837,
"step-1": "<mask token>\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-3": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-4": "import sys\nimport setuptools\nfrom distutils.core import setup\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport sys\nimport setuptools\nfrom distutils.core import setup\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if \"=\" in line:\n exec(compile(line, \"\", 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\nNAME, AUTHOR, VERSION = get_info()\n\nsys.dont_write_bytecode = True\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n author=AUTHOR,\n author_email=\"[email protected]\",\n description=\"a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/smilefufu/PIKACHU\",\n data_files = [(\"\", [\"LICENSE\"])],\n packages=setuptools.find_packages(),\n install_requires=[\n \"pika\",\n ],\n classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'\n ),\n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class AccountNotificationView(BaseView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AccountNotificationView(BaseView):
<|reserved_special_token_0|>
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user,
request.POST or None)
reports_form = NotificationReportSettingsForm(request.user, request
.POST or None, prefix='reports')
org_list = list(Organization.objects.filter(status=
OrganizationStatus.VISIBLE, member_set__user=request.user).
distinct())
org_forms = [(org, NotificationDeploySettingsForm(request.user, org,
request.POST or None, prefix='deploys-org-%s' % (org.id,))) for
org in sorted(org_list, key=lambda o: o.name)]
project_list = list(Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.
user, team__organizationmemberteam__is_active=True, status=
ProjectStatus.VISIBLE).distinct())
project_forms = [(project, ProjectEmailOptionsForm(project, request
.user, request.POST or None, prefix='project-%s' % (project.id,
))) for project in sorted(project_list, key=lambda x: (x.
organization.name, x.name))]
ext_forms = []
for plugin in plugins.all():
for form in (safe_execute(plugin.get_notification_forms,
_with_transaction=False) or ()):
form = safe_execute(form, plugin, request.user, request.
POST or None, prefix=plugin.slug, _with_transaction=False)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(itertools.chain([settings_form, reports_form],
ext_forms, (f for _, f in project_forms), (f for _, f in
org_forms)))
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS,
'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({'settings_form': settings_form, 'project_forms':
project_forms, 'org_forms': org_forms, 'reports_form':
reports_form, 'ext_forms': ext_forms, 'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers()})
return render_to_response('sentry/account/notifications.html',
context, request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AccountNotificationView(BaseView):
notification_settings_form = NotificationSettingsForm
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user,
request.POST or None)
reports_form = NotificationReportSettingsForm(request.user, request
.POST or None, prefix='reports')
org_list = list(Organization.objects.filter(status=
OrganizationStatus.VISIBLE, member_set__user=request.user).
distinct())
org_forms = [(org, NotificationDeploySettingsForm(request.user, org,
request.POST or None, prefix='deploys-org-%s' % (org.id,))) for
org in sorted(org_list, key=lambda o: o.name)]
project_list = list(Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.
user, team__organizationmemberteam__is_active=True, status=
ProjectStatus.VISIBLE).distinct())
project_forms = [(project, ProjectEmailOptionsForm(project, request
.user, request.POST or None, prefix='project-%s' % (project.id,
))) for project in sorted(project_list, key=lambda x: (x.
organization.name, x.name))]
ext_forms = []
for plugin in plugins.all():
for form in (safe_execute(plugin.get_notification_forms,
_with_transaction=False) or ()):
form = safe_execute(form, plugin, request.user, request.
POST or None, prefix=plugin.slug, _with_transaction=False)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(itertools.chain([settings_form, reports_form],
ext_forms, (f for _, f in project_forms), (f for _, f in
org_forms)))
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS,
'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({'settings_form': settings_form, 'project_forms':
project_forms, 'org_forms': org_forms, 'reports_form':
reports_form, 'ext_forms': ext_forms, 'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers()})
return render_to_response('sentry/account/notifications.html',
context, request)
<|reserved_special_token_1|>
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from sudo.decorators import sudo_required
from sentry.models import Project, ProjectStatus, Organization, OrganizationStatus
from sentry.plugins import plugins
from sentry.web.forms.accounts import ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm, NotificationDeploySettingsForm
from sentry.web.decorators import login_required
from sentry.web.frontend.base import BaseView
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers
from sentry.utils.safe import safe_execute
class AccountNotificationView(BaseView):
notification_settings_form = NotificationSettingsForm
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user,
request.POST or None)
reports_form = NotificationReportSettingsForm(request.user, request
.POST or None, prefix='reports')
org_list = list(Organization.objects.filter(status=
OrganizationStatus.VISIBLE, member_set__user=request.user).
distinct())
org_forms = [(org, NotificationDeploySettingsForm(request.user, org,
request.POST or None, prefix='deploys-org-%s' % (org.id,))) for
org in sorted(org_list, key=lambda o: o.name)]
project_list = list(Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.
user, team__organizationmemberteam__is_active=True, status=
ProjectStatus.VISIBLE).distinct())
project_forms = [(project, ProjectEmailOptionsForm(project, request
.user, request.POST or None, prefix='project-%s' % (project.id,
))) for project in sorted(project_list, key=lambda x: (x.
organization.name, x.name))]
ext_forms = []
for plugin in plugins.all():
for form in (safe_execute(plugin.get_notification_forms,
_with_transaction=False) or ()):
form = safe_execute(form, plugin, request.user, request.
POST or None, prefix=plugin.slug, _with_transaction=False)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(itertools.chain([settings_form, reports_form],
ext_forms, (f for _, f in project_forms), (f for _, f in
org_forms)))
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS,
'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({'settings_form': settings_form, 'project_forms':
project_forms, 'org_forms': org_forms, 'reports_form':
reports_form, 'ext_forms': ext_forms, 'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers()})
return render_to_response('sentry/account/notifications.html',
context, request)
<|reserved_special_token_1|>
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from sudo.decorators import sudo_required
from sentry.models import (Project, ProjectStatus, Organization, OrganizationStatus)
from sentry.plugins import plugins
from sentry.web.forms.accounts import (
ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm,
NotificationDeploySettingsForm
)
from sentry.web.decorators import login_required
from sentry.web.frontend.base import BaseView
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers
from sentry.utils.safe import safe_execute
class AccountNotificationView(BaseView):
notification_settings_form = NotificationSettingsForm
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user, request.POST or None)
reports_form = NotificationReportSettingsForm(
request.user, request.POST or None, prefix='reports'
)
org_list = list(
Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=request.user,
).distinct()
)
org_forms = [
(
org, NotificationDeploySettingsForm(
request.user, org, request.POST or None, prefix='deploys-org-%s' % (org.id, )
)
) for org in sorted(org_list, key=lambda o: o.name)
]
project_list = list(
Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.user,
team__organizationmemberteam__is_active=True,
status=ProjectStatus.VISIBLE,
).distinct()
)
project_forms = [
(
project, ProjectEmailOptionsForm(
project,
request.user,
request.POST or None,
prefix='project-%s' % (project.id, )
)
) for project in sorted(project_list, key=lambda x: (x.organization.name, x.name))
]
ext_forms = []
for plugin in plugins.all():
for form in safe_execute(plugin.get_notification_forms, _with_transaction=False) or ():
form = safe_execute(
form,
plugin,
request.user,
request.POST or None,
prefix=plugin.slug,
_with_transaction=False
)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(
itertools.chain(
[settings_form, reports_form], ext_forms, (f for _, f in project_forms),
(f for _, f in org_forms)
)
)
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'settings_form': settings_form,
'project_forms': project_forms,
'org_forms': org_forms,
'reports_form': reports_form,
'ext_forms': ext_forms,
'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers(),
}
)
return render_to_response('sentry/account/notifications.html', context, request)
|
flexible
|
{
"blob_id": "46f218829e1bf324d4c50ea0ff7003bc48b64e2a",
"index": 4258,
"step-1": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n <mask token>\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-3": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-4": "from __future__ import absolute_import\nimport itertools\nfrom django.contrib import messages\nfrom django.core.context_processors import csrf\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\nfrom sudo.decorators import sudo_required\nfrom sentry.models import Project, ProjectStatus, Organization, OrganizationStatus\nfrom sentry.plugins import plugins\nfrom sentry.web.forms.accounts import ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm, NotificationDeploySettingsForm\nfrom sentry.web.decorators import login_required\nfrom sentry.web.frontend.base import BaseView\nfrom sentry.web.helpers import render_to_response\nfrom sentry.utils.auth import get_auth_providers\nfrom sentry.utils.safe import safe_execute\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-5": "from __future__ import absolute_import\n\nimport itertools\n\nfrom django.contrib import messages\nfrom django.core.context_processors import csrf\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\n\nfrom sudo.decorators import sudo_required\n\nfrom sentry.models import (Project, ProjectStatus, Organization, OrganizationStatus)\nfrom sentry.plugins import plugins\nfrom sentry.web.forms.accounts import (\n ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm,\n NotificationDeploySettingsForm\n)\nfrom sentry.web.decorators import login_required\nfrom sentry.web.frontend.base import BaseView\nfrom sentry.web.helpers import render_to_response\nfrom sentry.utils.auth import get_auth_providers\nfrom sentry.utils.safe import safe_execute\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, request.POST or None)\n reports_form = NotificationReportSettingsForm(\n request.user, request.POST or None, prefix='reports'\n )\n\n org_list = list(\n Organization.objects.filter(\n status=OrganizationStatus.VISIBLE,\n member_set__user=request.user,\n ).distinct()\n )\n\n org_forms = [\n (\n org, NotificationDeploySettingsForm(\n request.user, org, request.POST or None, prefix='deploys-org-%s' % (org.id, )\n )\n ) for org in sorted(org_list, key=lambda o: o.name)\n ]\n\n project_list = list(\n Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.user,\n team__organizationmemberteam__is_active=True,\n status=ProjectStatus.VISIBLE,\n ).distinct()\n )\n\n project_forms = [\n (\n project, ProjectEmailOptionsForm(\n project,\n request.user,\n request.POST or None,\n prefix='project-%s' % (project.id, )\n )\n ) for project in sorted(project_list, key=lambda x: (x.organization.name, x.name))\n ]\n\n ext_forms = []\n for plugin in plugins.all():\n for form in safe_execute(plugin.get_notification_forms, _with_transaction=False) or ():\n form = safe_execute(\n form,\n plugin,\n request.user,\n request.POST or None,\n prefix=plugin.slug,\n _with_transaction=False\n )\n if not form:\n continue\n ext_forms.append(form)\n\n if request.POST:\n all_forms = list(\n itertools.chain(\n [settings_form, reports_form], ext_forms, (f for _, f in project_forms),\n (f for _, f in org_forms)\n )\n )\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n\n context = csrf(request)\n context.update(\n {\n 'settings_form': settings_form,\n 'project_forms': project_forms,\n 'org_forms': org_forms,\n 'reports_form': reports_form,\n 'ext_forms': ext_forms,\n 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers(),\n }\n )\n return render_to_response('sentry/account/notifications.html', context, request)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
version https://git-lfs.github.com/spec/v1
oid sha256:839b1a9cc0c676f388ebfe8d8f2e89ad7c39a6f0aa50fa76b2236703bf1a8264
size 62
|
normal
|
{
"blob_id": "23150f359db97e1e0ce3f12a173cd7015ad22cd4",
"index": 2220,
"step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:839b1a9cc0c676f388ebfe8d8f2e89ad7c39a6f0aa50fa76b2236703bf1a8264\nsize 62\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
# __file__: 当前文件
# os.path.dirname(): 所在目录
# os.path.abspath(): 当前文件/目录的绝对路径
# os.path.join(): 路径连接
# 项目路径
BASEDIR = os.path.abspath(
os.path.dirname(
os.path.dirname(
__file__)))
# 数据文件目录
DATA_DIR = os.path.join(BASEDIR, "data")
DATA_FILE = os.path.join(DATA_DIR, 'data.yaml')
|
normal
|
{
"blob_id": "7a793c2081032745ae58f92a4572954333742dfd",
"index": 3943,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBASEDIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nDATA_DIR = os.path.join(BASEDIR, 'data')\nDATA_FILE = os.path.join(DATA_DIR, 'data.yaml')\n",
"step-3": "import os\nBASEDIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nDATA_DIR = os.path.join(BASEDIR, 'data')\nDATA_FILE = os.path.join(DATA_DIR, 'data.yaml')\n",
"step-4": "import os\n\n# __file__: 当前文件\n# os.path.dirname(): 所在目录\n# os.path.abspath(): 当前文件/目录的绝对路径\n# os.path.join(): 路径连接\n\n# 项目路径\nBASEDIR = os.path.abspath(\n os.path.dirname(\n os.path.dirname(\n __file__)))\n\n# 数据文件目录\nDATA_DIR = os.path.join(BASEDIR, \"data\")\nDATA_FILE = os.path.join(DATA_DIR, 'data.yaml')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def detect_lines_hough(img):
lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi /
2, threshold=50, minLineLength=120, maxLineGap=10)
return [line[0] for line in lines]
<|reserved_special_token_0|>
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = x
elif current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = y
elif current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return horizontal, vertical
<|reserved_special_token_0|>
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1, y1, x2, y2 in lines:
if abs(y1 - y2) > abs(x1 - x2):
vertical.append((x1, y1, x2, y2))
else:
horizontal.append((x1, y1, x2, y2))
return horizontal, vertical
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
for index, (x1, y1, x2, y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if abs(x1 - x1_b) < min_distance:
if y2_b < y2:
y2 = y2_b
if y1_b > y1:
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
x = int(np.mean(x_values))
output_vertical.append((x, y1, x, y2))
for index, (x1, y1, x2, y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal
):
if other_index in seen_horizontal:
continue
if abs(y1 - y1_b) < min_distance:
if x1_b < x1:
x1 = x1_b
if x2_b > x2:
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
y = int(np.mean(y_values))
output_horizontal.append((x1, y, x2, y))
return output_vertical, output_horizontal
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1, y1, x2, y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1, v_y1, v_x2, v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1, y1, x2, y2))
for x1, y1, x2, y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1, y1, x2, y2))
return horizontal, vertical
<|reserved_special_token_0|>
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1, y_h, x_2, _ in horizontal:
for x_v, y_1, _, y_2 in vertical:
crossing = x_v, y_h
if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:
if x_1 == x_v:
if y_1 != y_h:
bottom_left.append(crossing)
if y_2 != y_h:
top_left.append(crossing)
elif x_2 == x_v:
if y_1 != y_h:
bottom_right.append(crossing)
if y_2 != y_h:
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return top_left, bottom_left, bottom_right, top_right
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def detect_lines_hough(img):
lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi /
2, threshold=50, minLineLength=120, maxLineGap=10)
return [line[0] for line in lines]
<|reserved_special_token_0|>
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = x
elif current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = y
elif current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return horizontal, vertical
<|reserved_special_token_0|>
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1, y1, x2, y2 in lines:
if abs(y1 - y2) > abs(x1 - x2):
vertical.append((x1, y1, x2, y2))
else:
horizontal.append((x1, y1, x2, y2))
return horizontal, vertical
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
for index, (x1, y1, x2, y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if abs(x1 - x1_b) < min_distance:
if y2_b < y2:
y2 = y2_b
if y1_b > y1:
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
x = int(np.mean(x_values))
output_vertical.append((x, y1, x, y2))
for index, (x1, y1, x2, y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal
):
if other_index in seen_horizontal:
continue
if abs(y1 - y1_b) < min_distance:
if x1_b < x1:
x1 = x1_b
if x2_b > x2:
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
y = int(np.mean(y_values))
output_horizontal.append((x1, y, x2, y))
return output_vertical, output_horizontal
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1, y1, x2, y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1, v_y1, v_x2, v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1, y1, x2, y2))
for x1, y1, x2, y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1, y1, x2, y2))
return horizontal, vertical
def find_rectangles(top_left, bottom_left, bottom_right, top_right):
top_right.sort(key=lambda pos: pos[0])
bottom_left.sort(key=lambda pos: pos[1])
rectangles = []
for x, y in top_left:
a = [tr for tr in top_right if tr[1] == y and tr[0] > x]
b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]
if len(a) == 0 or len(b) == 0:
continue
x2, _a = a[0]
_, y2 = b[0]
w = x2 - x
h = y2 - y
rectangles.append((x, y, w, h))
return rectangles
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1, y_h, x_2, _ in horizontal:
for x_v, y_1, _, y_2 in vertical:
crossing = x_v, y_h
if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:
if x_1 == x_v:
if y_1 != y_h:
bottom_left.append(crossing)
if y_2 != y_h:
top_left.append(crossing)
elif x_2 == x_v:
if y_1 != y_h:
bottom_right.append(crossing)
if y_2 != y_h:
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return top_left, bottom_left, bottom_right, top_right
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def detect_lines_hough(img):
lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi /
2, threshold=50, minLineLength=120, maxLineGap=10)
return [line[0] for line in lines]
<|reserved_special_token_0|>
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = x
elif current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = y
elif current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return horizontal, vertical
def remove_lines_close_to_border(horizontal, vertical, width, height,
min_distance):
horizontal_result = []
vertical_result = []
for h in horizontal:
y = h[1]
if y > min_distance and height - y > min_distance:
horizontal_result.append(h)
for v in vertical:
x = v[0]
if x > min_distance and width - x > min_distance:
vertical_result.append(v)
return horizontal_result, vertical_result
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1, y1, x2, y2 in lines:
if abs(y1 - y2) > abs(x1 - x2):
vertical.append((x1, y1, x2, y2))
else:
horizontal.append((x1, y1, x2, y2))
return horizontal, vertical
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
for index, (x1, y1, x2, y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if abs(x1 - x1_b) < min_distance:
if y2_b < y2:
y2 = y2_b
if y1_b > y1:
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
x = int(np.mean(x_values))
output_vertical.append((x, y1, x, y2))
for index, (x1, y1, x2, y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal
):
if other_index in seen_horizontal:
continue
if abs(y1 - y1_b) < min_distance:
if x1_b < x1:
x1 = x1_b
if x2_b > x2:
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
y = int(np.mean(y_values))
output_horizontal.append((x1, y, x2, y))
return output_vertical, output_horizontal
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1, y1, x2, y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1, v_y1, v_x2, v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1, y1, x2, y2))
for x1, y1, x2, y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1, y1, x2, y2))
return horizontal, vertical
def find_rectangles(top_left, bottom_left, bottom_right, top_right):
top_right.sort(key=lambda pos: pos[0])
bottom_left.sort(key=lambda pos: pos[1])
rectangles = []
for x, y in top_left:
a = [tr for tr in top_right if tr[1] == y and tr[0] > x]
b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]
if len(a) == 0 or len(b) == 0:
continue
x2, _a = a[0]
_, y2 = b[0]
w = x2 - x
h = y2 - y
rectangles.append((x, y, w, h))
return rectangles
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1, y_h, x_2, _ in horizontal:
for x_v, y_1, _, y_2 in vertical:
crossing = x_v, y_h
if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:
if x_1 == x_v:
if y_1 != y_h:
bottom_left.append(crossing)
if y_2 != y_h:
top_left.append(crossing)
elif x_2 == x_v:
if y_1 != y_h:
bottom_right.append(crossing)
if y_2 != y_h:
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return top_left, bottom_left, bottom_right, top_right
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def detect_lines_hough(img):
lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi /
2, threshold=50, minLineLength=120, maxLineGap=10)
return [line[0] for line in lines]
def detect_lines_rust(img, min_line_length):
height, width = img.shape
white = (img == 255).flatten().tolist()
detected = myrustlib.detect_lines(white, width, height, min_line_length)
return split_by_orientation(detected)
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = x
elif current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y, x)
if is_white:
if not current_line:
current_line = True
current_line_start = y
elif current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return horizontal, vertical
def remove_lines_close_to_border(horizontal, vertical, width, height,
min_distance):
horizontal_result = []
vertical_result = []
for h in horizontal:
y = h[1]
if y > min_distance and height - y > min_distance:
horizontal_result.append(h)
for v in vertical:
x = v[0]
if x > min_distance and width - x > min_distance:
vertical_result.append(v)
return horizontal_result, vertical_result
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1, y1, x2, y2 in lines:
if abs(y1 - y2) > abs(x1 - x2):
vertical.append((x1, y1, x2, y2))
else:
horizontal.append((x1, y1, x2, y2))
return horizontal, vertical
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
for index, (x1, y1, x2, y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if abs(x1 - x1_b) < min_distance:
if y2_b < y2:
y2 = y2_b
if y1_b > y1:
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
x = int(np.mean(x_values))
output_vertical.append((x, y1, x, y2))
for index, (x1, y1, x2, y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal
):
if other_index in seen_horizontal:
continue
if abs(y1 - y1_b) < min_distance:
if x1_b < x1:
x1 = x1_b
if x2_b > x2:
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
y = int(np.mean(y_values))
output_horizontal.append((x1, y, x2, y))
return output_vertical, output_horizontal
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1, y1, x2, y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1, v_y1, v_x2, v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1, y1, x2, y2))
for x1, y1, x2, y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1, y1, x2, y2))
return horizontal, vertical
def find_rectangles(top_left, bottom_left, bottom_right, top_right):
top_right.sort(key=lambda pos: pos[0])
bottom_left.sort(key=lambda pos: pos[1])
rectangles = []
for x, y in top_left:
a = [tr for tr in top_right if tr[1] == y and tr[0] > x]
b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]
if len(a) == 0 or len(b) == 0:
continue
x2, _a = a[0]
_, y2 = b[0]
w = x2 - x
h = y2 - y
rectangles.append((x, y, w, h))
return rectangles
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1, y_h, x_2, _ in horizontal:
for x_v, y_1, _, y_2 in vertical:
crossing = x_v, y_h
if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:
if x_1 == x_v:
if y_1 != y_h:
bottom_left.append(crossing)
if y_2 != y_h:
top_left.append(crossing)
elif x_2 == x_v:
if y_1 != y_h:
bottom_right.append(crossing)
if y_2 != y_h:
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return top_left, bottom_left, bottom_right, top_right
<|reserved_special_token_1|>
import numpy as np
import cv2
import myrustlib
def detect_lines_hough(img):
lines = cv2.HoughLinesP(
cv2.bitwise_not(opening),
rho = 1,
theta = np.pi / 2,
threshold=50,
minLineLength=120,
maxLineGap=10
)
return [line[0] for line in lines] # weird HoughLinesP output
def detect_lines_rust(img, min_line_length):
height, width = img.shape
white = (img == 255).flatten().tolist()
detected = myrustlib.detect_lines(white, width, height, min_line_length)
return split_by_orientation(detected)
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y,x)
if(is_white):
if not current_line:
current_line = True
current_line_start = x
else:
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y,x)
if(is_white):
if not current_line:
current_line = True
current_line_start = y
else:
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return (horizontal, vertical)
def remove_lines_close_to_border(horizontal, vertical, width, height, min_distance):
horizontal_result = []
vertical_result = []
for h in horizontal:
y = h[1]
if y > min_distance and height - y > min_distance:
horizontal_result.append(h)
for v in vertical:
x = v[0]
if x > min_distance and width - x > min_distance:
vertical_result.append(v)
return (horizontal_result, vertical_result)
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1,y1,x2,y2 in lines:
if (abs(y1-y2) > abs(x1-x2)):
vertical.append((x1,y1,x2,y2))
else:
horizontal.append((x1,y1,x2,y2))
return (horizontal, vertical)
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
# vertical
for index, (x1,y1,x2,y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if (abs(x1 - x1_b) < min_distance):
# if the end is further to the top, choose this end
if (y2_b < y2):
y2 = y2_b
# if the start if further to the bottom, choose it
if (y1_b > y1):
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
# taking the average x value for all the lines to get the middle
x = int(np.mean(x_values))
output_vertical.append((x,y1,x,y2))
#horizontal
for index, (x1,y1,x2,y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):
if other_index in seen_horizontal:
continue
if (abs(y1 - y1_b) < min_distance):
# if the start if further to the left, choose this point
if (x1_b < x1):
x1 = x1_b
# if the end is further to the right, choose it
if (x2_b > x2):
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
# taking the average y value for all the lines to get the middle
y = int(np.mean(y_values))
output_horizontal.append((x1,y,x2,y))
return (output_vertical, output_horizontal)
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1,y1,x2,y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1,v_y1,v_x2,v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1,y1,x2,y2))
for x1,y1,x2,y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1,y1,x2,y2))
return (horizontal, vertical)
def find_rectangles(top_left, bottom_left, bottom_right, top_right):
top_right.sort(key=lambda pos: pos[0])
bottom_left.sort(key=lambda pos: pos[1])
rectangles = []
for x,y in top_left:
a = [tr for tr in top_right if tr[1] == y and tr[0] > x]
b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]
if (len(a) == 0 or len(b) == 0):
continue
x2,_a = a[0]
_,y2 = b[0]
w = x2 - x
h = y2 - y
rectangles.append((x,y,w,h))
return rectangles
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1,y_h,x_2,_ in horizontal:
for x_v,y_1,_,y_2 in vertical:
crossing = (x_v, y_h)
if (x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2):
if (x_1 == x_v):
# left
if (y_1 != y_h):
bottom_left.append(crossing)
if (y_2 != y_h):
top_left.append(crossing)
elif (x_2 == x_v):
# right
if (y_1 != y_h):
bottom_right.append(crossing)
if (y_2 != y_h):
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return (top_left, bottom_left, bottom_right, top_right)
|
flexible
|
{
"blob_id": "bb5bea4ea100950b59fb2b168b75dec349938aac",
"index": 7195,
"step-1": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-2": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-3": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height,\n min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return horizontal_result, vertical_result\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-4": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\ndef detect_lines_rust(img, min_line_length):\n height, width = img.shape\n white = (img == 255).flatten().tolist()\n detected = myrustlib.detect_lines(white, width, height, min_line_length)\n return split_by_orientation(detected)\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height,\n min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return horizontal_result, vertical_result\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-5": "import numpy as np\nimport cv2\nimport myrustlib\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(\n cv2.bitwise_not(opening),\n rho = 1,\n theta = np.pi / 2,\n threshold=50,\n minLineLength=120,\n maxLineGap=10\n )\n return [line[0] for line in lines] # weird HoughLinesP output\n\ndef detect_lines_rust(img, min_line_length):\n height, width = img.shape\n white = (img == 255).flatten().tolist()\n detected = myrustlib.detect_lines(white, width, height, min_line_length)\n return split_by_orientation(detected)\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n\n white = img == 255\n\n for y in range(height):\n for x in range(width):\n is_white = white.item(y,x)\n if(is_white):\n if not current_line:\n current_line = True\n current_line_start = x\n else:\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y,x)\n if(is_white):\n if not current_line:\n current_line = True\n current_line_start = y\n else:\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return (horizontal, vertical)\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height, min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return (horizontal_result, vertical_result)\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1,y1,x2,y2 in lines:\n if (abs(y1-y2) > abs(x1-x2)):\n vertical.append((x1,y1,x2,y2))\n else:\n horizontal.append((x1,y1,x2,y2))\n return (horizontal, vertical)\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n\n # vertical\n for index, (x1,y1,x2,y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if (abs(x1 - x1_b) < min_distance):\n # if the end is further to the top, choose this end\n if (y2_b < y2):\n y2 = y2_b\n # if the start if further to the bottom, choose it\n if (y1_b > y1):\n y1 = y1_b\n\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n\n # taking the average x value for all the lines to get the middle\n x = int(np.mean(x_values))\n output_vertical.append((x,y1,x,y2))\n\n #horizontal\n for index, (x1,y1,x2,y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):\n if other_index in seen_horizontal:\n continue\n if (abs(y1 - y1_b) < min_distance):\n # if the start if further to the left, choose this point\n if (x1_b < x1):\n x1 = x1_b\n # if the end is further to the right, choose it\n if (x2_b > x2):\n x2 = x2_b\n\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n\n # taking the average y value for all the lines to get the middle\n y = int(np.mean(y_values))\n output_horizontal.append((x1,y,x2,y))\n\n return (output_vertical, output_horizontal)\n\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n\n for x1,y1,x2,y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1,v_y1,v_x2,v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1,y1,x2,y2))\n\n for x1,y1,x2,y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1,y1,x2,y2))\n\n return (horizontal, vertical)\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x,y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if (len(a) == 0 or len(b) == 0):\n continue\n x2,_a = a[0]\n _,y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x,y,w,h))\n return rectangles\n\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n\n for x_1,y_h,x_2,_ in horizontal:\n for x_v,y_1,_,y_2 in vertical:\n crossing = (x_v, y_h)\n if (x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2):\n if (x_1 == x_v):\n # left\n if (y_1 != y_h):\n bottom_left.append(crossing)\n if (y_2 != y_h):\n top_left.append(crossing)\n elif (x_2 == x_v):\n # right\n if (y_1 != y_h):\n bottom_right.append(crossing)\n if (y_2 != y_h):\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n\n return (top_left, bottom_left, bottom_right, top_right)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def grab_a_ticker(symbol='MSFT', apiKey=None):
if apiKey is None:
apiKey = os.environ.get('API_KEY')
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = (
'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'
)
metaDataUrl = (
'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'
)
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).
transpose()['4. close']).reset_index()
df.columns = ['Date', 'Price']
df['Symbol'] = data['Meta Data']['2. Symbol']
if len(metaData['bestMatches']) > 0:
met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[
'1. symbol', '2. name', '3. type', '4. region']].reset_index(
).drop(['index'], axis=1)
met_df.columns = ['Symbol', 'Name', 'Type', 'Region']
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql('time_series', con=get_db(), if_exists='append',
index=False)
met_df.to_sql('stock_meta_data', con=get_db(), if_exists=
'append', index=False)
except AssertionError as e:
print("'Couldn't get it right with assertion error: {}".format(
str(e)))
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print('Symbol {} already exists.'.format(symbol))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data(url, delay=20):
while True:
df = json.loads(urllib.request.urlopen(url).read())
if df.get('Note', 0) == 0:
break
time.sleep(20)
return df
def grab_a_ticker(symbol='MSFT', apiKey=None):
if apiKey is None:
apiKey = os.environ.get('API_KEY')
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = (
'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'
)
metaDataUrl = (
'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'
)
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).
transpose()['4. close']).reset_index()
df.columns = ['Date', 'Price']
df['Symbol'] = data['Meta Data']['2. Symbol']
if len(metaData['bestMatches']) > 0:
met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[
'1. symbol', '2. name', '3. type', '4. region']].reset_index(
).drop(['index'], axis=1)
met_df.columns = ['Symbol', 'Name', 'Type', 'Region']
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql('time_series', con=get_db(), if_exists='append',
index=False)
met_df.to_sql('stock_meta_data', con=get_db(), if_exists=
'append', index=False)
except AssertionError as e:
print("'Couldn't get it right with assertion error: {}".format(
str(e)))
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print('Symbol {} already exists.'.format(symbol))
<|reserved_special_token_1|>
import json
import os
import time
import urllib.request
import pandas as pd
from lib.db.dbutils import check_blacklisted, check_ticker_exists, get_db, update_blacklisted
def get_data(url, delay=20):
while True:
df = json.loads(urllib.request.urlopen(url).read())
if df.get('Note', 0) == 0:
break
time.sleep(20)
return df
def grab_a_ticker(symbol='MSFT', apiKey=None):
if apiKey is None:
apiKey = os.environ.get('API_KEY')
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = (
'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'
)
metaDataUrl = (
'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'
)
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).
transpose()['4. close']).reset_index()
df.columns = ['Date', 'Price']
df['Symbol'] = data['Meta Data']['2. Symbol']
if len(metaData['bestMatches']) > 0:
met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[
'1. symbol', '2. name', '3. type', '4. region']].reset_index(
).drop(['index'], axis=1)
met_df.columns = ['Symbol', 'Name', 'Type', 'Region']
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql('time_series', con=get_db(), if_exists='append',
index=False)
met_df.to_sql('stock_meta_data', con=get_db(), if_exists=
'append', index=False)
except AssertionError as e:
print("'Couldn't get it right with assertion error: {}".format(
str(e)))
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print('Symbol {} already exists.'.format(symbol))
<|reserved_special_token_1|>
import json
import os
import time
import urllib.request
import pandas as pd
from lib.db.dbutils import (
check_blacklisted,
check_ticker_exists,
get_db,
update_blacklisted,
)
def get_data(url, delay=20):
while True:
df = json.loads(urllib.request.urlopen(url).read())
if df.get("Note", 0) == 0:
break
time.sleep(20)
return df
def grab_a_ticker(symbol="MSFT", apiKey=None):
if apiKey is None:
apiKey = os.environ.get("API_KEY")
# Check if ticker already exists in the database
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = r"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}"
metaDataUrl = r"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}"
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(
pd.DataFrame(data.get("Time Series (Daily)")).transpose()[
"4. close"
]
).reset_index()
df.columns = ["Date", "Price"]
df["Symbol"] = data["Meta Data"]["2. Symbol"]
if len(metaData["bestMatches"]) > 0:
met_df = (
pd.DataFrame(metaData["bestMatches"][0], index=[0])[
["1. symbol", "2. name", "3. type", "4. region"]
]
.reset_index()
.drop(["index"], axis=1)
)
met_df.columns = ["Symbol", "Name", "Type", "Region"]
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql(
"time_series", con=get_db(), if_exists="append", index=False
)
met_df.to_sql(
"stock_meta_data",
con=get_db(),
if_exists="append",
index=False,
)
except AssertionError as e:
print(
"'Couldn't get it right with assertion error: {}".format(
str(e)
)
)
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print("Symbol {} already exists.".format(symbol))
|
flexible
|
{
"blob_id": "3c8e6a93c4d5616b9199cf473d298bfa2dc191af",
"index": 9971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-3": "<mask token>\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-4": "import json\nimport os\nimport time\nimport urllib.request\nimport pandas as pd\nfrom lib.db.dbutils import check_blacklisted, check_ticker_exists, get_db, update_blacklisted\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-5": "import json\nimport os\nimport time\nimport urllib.request\n\nimport pandas as pd\n\nfrom lib.db.dbutils import (\n check_blacklisted,\n check_ticker_exists,\n get_db,\n update_blacklisted,\n)\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get(\"Note\", 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol=\"MSFT\", apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get(\"API_KEY\")\n # Check if ticker already exists in the database\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = r\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}\"\n metaDataUrl = r\"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}\"\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(\n pd.DataFrame(data.get(\"Time Series (Daily)\")).transpose()[\n \"4. close\"\n ]\n ).reset_index()\n\n df.columns = [\"Date\", \"Price\"]\n df[\"Symbol\"] = data[\"Meta Data\"][\"2. Symbol\"]\n if len(metaData[\"bestMatches\"]) > 0:\n met_df = (\n pd.DataFrame(metaData[\"bestMatches\"][0], index=[0])[\n [\"1. symbol\", \"2. name\", \"3. type\", \"4. region\"]\n ]\n .reset_index()\n .drop([\"index\"], axis=1)\n )\n met_df.columns = [\"Symbol\", \"Name\", \"Type\", \"Region\"]\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql(\n \"time_series\", con=get_db(), if_exists=\"append\", index=False\n )\n met_df.to_sql(\n \"stock_meta_data\",\n con=get_db(),\n if_exists=\"append\",\n index=False,\n )\n except AssertionError as e:\n print(\n \"'Couldn't get it right with assertion error: {}\".format(\n str(e)\n )\n )\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print(\"Symbol {} already exists.\".format(symbol))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from svjesus.ffz import genContent
from svjesus.elements.Base import Element
class Descriptive(Element):
def __init__(self):
self.allowedChildren = () # TODO: Check what's allowed
# Descriptive elements
class Desc(Descriptive):
name = "desc"
attrs = ()
class Metadata(Descriptive):
name = "metadata"
attrs = ()
class Title(Descriptive):
name = "title"
attrs = ()
|
normal
|
{
"blob_id": "178570047458eb3eeda00f9153ef2159eb4cbef3",
"index": 9188,
"step-1": "<mask token>\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n",
"step-2": "<mask token>\n\n\nclass Descriptive(Element):\n <mask token>\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n",
"step-3": "<mask token>\n\n\nclass Descriptive(Element):\n\n def __init__(self):\n self.allowedChildren = ()\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n",
"step-4": "from svjesus.ffz import genContent\nfrom svjesus.elements.Base import Element\n\n\nclass Descriptive(Element):\n\n def __init__(self):\n self.allowedChildren = ()\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n",
"step-5": "from svjesus.ffz import genContent\nfrom svjesus.elements.Base import Element\n\nclass Descriptive(Element):\n\tdef __init__(self):\n\t\tself.allowedChildren = () # TODO: Check what's allowed\n\n# Descriptive elements\nclass Desc(Descriptive):\n\tname = \"desc\"\n\tattrs = ()\n\nclass Metadata(Descriptive):\n\tname = \"metadata\"\n\tattrs = ()\n\nclass Title(Descriptive):\n\tname = \"title\"\n\tattrs = ()",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def submitAction():
for i in userDict:
print(f'{userDict.get(i).get()}')
exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
win.title('Loop')
<|reserved_special_token_0|>
for i in range(len(labels)):
currentLabel = ttk.Label(win, text=labels[i])
currentLabel.grid(row=i, column=0, sticky=tk.W)
<|reserved_special_token_0|>
for i in userDict:
currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])
currentEntryBox.grid(row=index, column=1)
index += 1
def submitAction():
for i in userDict:
print(f'{userDict.get(i).get()}')
exit()
<|reserved_special_token_0|>
submitButton.grid(row=index, column=0)
submitButton.configure(foreground='#ffffff', background='#000000')
win.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
win = tk.Tk()
win.title('Loop')
labels = [f'name : ', 'age : ', 'mail ID : ', 'city : ', 'country : ',
'phone number : ']
for i in range(len(labels)):
currentLabel = ttk.Label(win, text=labels[i])
currentLabel.grid(row=i, column=0, sticky=tk.W)
userDict = {'name': tk.StringVar(), 'age': tk.StringVar(), 'mail': tk.
StringVar(), 'city': tk.StringVar(), 'country': tk.StringVar(), 'phone':
tk.StringVar()}
index = 0
for i in userDict:
currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])
currentEntryBox.grid(row=index, column=1)
index += 1
def submitAction():
for i in userDict:
print(f'{userDict.get(i).get()}')
exit()
submitButton = tk.Button(win, text='Submit', command=submitAction)
submitButton.grid(row=index, column=0)
submitButton.configure(foreground='#ffffff', background='#000000')
win.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import ttk
win = tk.Tk()
win.title('Loop')
labels = [f'name : ', 'age : ', 'mail ID : ', 'city : ', 'country : ',
'phone number : ']
for i in range(len(labels)):
currentLabel = ttk.Label(win, text=labels[i])
currentLabel.grid(row=i, column=0, sticky=tk.W)
userDict = {'name': tk.StringVar(), 'age': tk.StringVar(), 'mail': tk.
StringVar(), 'city': tk.StringVar(), 'country': tk.StringVar(), 'phone':
tk.StringVar()}
index = 0
for i in userDict:
currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])
currentEntryBox.grid(row=index, column=1)
index += 1
def submitAction():
for i in userDict:
print(f'{userDict.get(i).get()}')
exit()
submitButton = tk.Button(win, text='Submit', command=submitAction)
submitButton.grid(row=index, column=0)
submitButton.configure(foreground='#ffffff', background='#000000')
win.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import ttk
win = tk.Tk()
win.title('Loop')
############ Time consuming :
# nameLable1 = ttk.Label(win,text="Enter your name : ")
# nameLable1.grid(row=0,column=0,sticky=tk.W)
# ageLable1 = ttk.Label(win,text="Enter your age: ")
# ageLable1.grid(row=1,column=0,sticky=tk.W)
# countryLable1 = ttk.Label(win,text="Enter your country: ")
# countryLable1.grid(row=2,column=0,sticky=tk.W)
# mailLable1 = ttk.Label(win,text="Enter your mail ID : ")
# mailLable1.grid(row=3,column=0,sticky=tk.W)
############ Loop :
labels = [f"name : ","age : ","mail ID : ","city : " ,"country : ","phone number : "]
for i in range(len(labels)):
# currentLabel = f"label{i}" # without declaring also it will work fine
currentLabel = ttk.Label(win,text=labels[i])
currentLabel.grid(row=i,column=0,sticky=tk.W)
userDict = {
'name':tk.StringVar(),
'age':tk.StringVar(),
'mail':tk.StringVar(),
'city':tk.StringVar(),
'country':tk.StringVar(),
'phone':tk.StringVar(),
}
index = 0
for i in userDict:
# currentEntryBox = f"entry{i}"
currentEntryBox = ttk.Entry(win,width = 16,textvariable = userDict[i])
currentEntryBox.grid(row = index,column = 1)
index+=1
def submitAction():
for i in userDict:
# print(f"{userDict[i].get()}")
print(f"{userDict.get(i).get()}") # their is get() method in dictionary too
exit()
submitButton = tk.Button(win,text="Submit",command = submitAction)
submitButton.grid(row = index,column = 0)
submitButton.configure(foreground = "#ffffff",background = "#000000")
################################################
win.mainloop()
###########################################################################################################
##########################################################################################################
|
flexible
|
{
"blob_id": "2eb49d08136c3540e1305310f03255e2ecbf0c40",
"index": 3175,
"step-1": "<mask token>\n\n\ndef submitAction():\n for i in userDict:\n print(f'{userDict.get(i).get()}')\n exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\nwin.title('Loop')\n<mask token>\nfor i in range(len(labels)):\n currentLabel = ttk.Label(win, text=labels[i])\n currentLabel.grid(row=i, column=0, sticky=tk.W)\n<mask token>\nfor i in userDict:\n currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])\n currentEntryBox.grid(row=index, column=1)\n index += 1\n\n\ndef submitAction():\n for i in userDict:\n print(f'{userDict.get(i).get()}')\n exit()\n\n\n<mask token>\nsubmitButton.grid(row=index, column=0)\nsubmitButton.configure(foreground='#ffffff', background='#000000')\nwin.mainloop()\n",
"step-3": "<mask token>\nwin = tk.Tk()\nwin.title('Loop')\nlabels = [f'name : ', 'age : ', 'mail ID : ', 'city : ', 'country : ',\n 'phone number : ']\nfor i in range(len(labels)):\n currentLabel = ttk.Label(win, text=labels[i])\n currentLabel.grid(row=i, column=0, sticky=tk.W)\nuserDict = {'name': tk.StringVar(), 'age': tk.StringVar(), 'mail': tk.\n StringVar(), 'city': tk.StringVar(), 'country': tk.StringVar(), 'phone':\n tk.StringVar()}\nindex = 0\nfor i in userDict:\n currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])\n currentEntryBox.grid(row=index, column=1)\n index += 1\n\n\ndef submitAction():\n for i in userDict:\n print(f'{userDict.get(i).get()}')\n exit()\n\n\nsubmitButton = tk.Button(win, text='Submit', command=submitAction)\nsubmitButton.grid(row=index, column=0)\nsubmitButton.configure(foreground='#ffffff', background='#000000')\nwin.mainloop()\n",
"step-4": "import tkinter as tk\nfrom tkinter import ttk\nwin = tk.Tk()\nwin.title('Loop')\nlabels = [f'name : ', 'age : ', 'mail ID : ', 'city : ', 'country : ',\n 'phone number : ']\nfor i in range(len(labels)):\n currentLabel = ttk.Label(win, text=labels[i])\n currentLabel.grid(row=i, column=0, sticky=tk.W)\nuserDict = {'name': tk.StringVar(), 'age': tk.StringVar(), 'mail': tk.\n StringVar(), 'city': tk.StringVar(), 'country': tk.StringVar(), 'phone':\n tk.StringVar()}\nindex = 0\nfor i in userDict:\n currentEntryBox = ttk.Entry(win, width=16, textvariable=userDict[i])\n currentEntryBox.grid(row=index, column=1)\n index += 1\n\n\ndef submitAction():\n for i in userDict:\n print(f'{userDict.get(i).get()}')\n exit()\n\n\nsubmitButton = tk.Button(win, text='Submit', command=submitAction)\nsubmitButton.grid(row=index, column=0)\nsubmitButton.configure(foreground='#ffffff', background='#000000')\nwin.mainloop()\n",
"step-5": "import tkinter as tk\nfrom tkinter import ttk\n\n\nwin = tk.Tk()\n\nwin.title('Loop')\n\n\n############ Time consuming :\n\n# nameLable1 = ttk.Label(win,text=\"Enter your name : \") \n# nameLable1.grid(row=0,column=0,sticky=tk.W) \n\n# ageLable1 = ttk.Label(win,text=\"Enter your age: \") \n# ageLable1.grid(row=1,column=0,sticky=tk.W) \n\n# countryLable1 = ttk.Label(win,text=\"Enter your country: \") \n# countryLable1.grid(row=2,column=0,sticky=tk.W)\n\n# mailLable1 = ttk.Label(win,text=\"Enter your mail ID : \") \n# mailLable1.grid(row=3,column=0,sticky=tk.W) \n\n\n############ Loop :\n\n\nlabels = [f\"name : \",\"age : \",\"mail ID : \",\"city : \" ,\"country : \",\"phone number : \"]\n\nfor i in range(len(labels)):\n # currentLabel = f\"label{i}\" # without declaring also it will work fine\n currentLabel = ttk.Label(win,text=labels[i]) \n currentLabel.grid(row=i,column=0,sticky=tk.W) \n\nuserDict = {\n 'name':tk.StringVar(),\n 'age':tk.StringVar(),\n 'mail':tk.StringVar(),\n 'city':tk.StringVar(),\n 'country':tk.StringVar(),\n 'phone':tk.StringVar(),\n}\n\nindex = 0\n\nfor i in userDict:\n # currentEntryBox = f\"entry{i}\"\n currentEntryBox = ttk.Entry(win,width = 16,textvariable = userDict[i])\n currentEntryBox.grid(row = index,column = 1)\n index+=1\n\ndef submitAction():\n for i in userDict:\n # print(f\"{userDict[i].get()}\")\n print(f\"{userDict.get(i).get()}\") # their is get() method in dictionary too\n exit()\n\n\nsubmitButton = tk.Button(win,text=\"Submit\",command = submitAction)\nsubmitButton.grid(row = index,column = 0)\nsubmitButton.configure(foreground = \"#ffffff\",background = \"#000000\")\n\n################################################\n\nwin.mainloop()\n\n\n###########################################################################################################\n##########################################################################################################",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
#Autor: Jesus Fabian Cubas <[email protected]>
#if
sesion = 2
if sesion == 1 :
print 'estamos en la sesion 01'
elif sesion == 2 :
print 'estamos en la sesion 02'
else :
print 'no estamos en la sesion 01'
#while
edad = 0
while edad < 18 :
edad = edad + 1
print edad
#for
lista = ["a", "b", "c", "d"]
for elemento in lista :
print elemento
|
normal
|
{
"blob_id": "64c4b64b6fb0cfa25c17f66243c60a5dc0166017",
"index": 7698,
"step-1": "#!/usr/bin/python\n#Autor: Jesus Fabian Cubas <[email protected]>\n\n#if\nsesion = 2\nif sesion == 1 :\n\tprint 'estamos en la sesion 01'\nelif sesion == 2 :\n\tprint 'estamos en la sesion 02'\nelse :\n\tprint 'no estamos en la sesion 01'\n\n#while\nedad = 0\nwhile edad < 18 :\n\tedad = edad + 1\nprint edad\n\n#for\nlista = [\"a\", \"b\", \"c\", \"d\"]\nfor elemento in lista :\n\tprint elemento\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""After seeing how great the lmfit package, I was inspired to create my own
object using it. This acts as a fitting template.
"""
##-------------------------------PREAMBLE-----------------------------------##
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
##-------------------------------CLASS DEFINITION-----------------------------------##
class FitTemplate():
def __init__(self, fit_function, log_dir = None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
#setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,
#and is printed to log file
if log_dir is not None:
logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data,weights,**kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data)*weights)**2
def do_minimisation(self, x, data, weights = 1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError("No fit result! Do a fit before asking for")
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info((fit_report(self.fit_result)))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs):
if ax is None:
_, ax = plt.subplots(1 ,1, constrained_layout=True, figsize=(18, 9))
plt.rcParams.update({'font.size': 16})
colours = ['b','m','c','r','tab:orange', 'tab:pink']
#decide colour
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
#scatter plot
ax.scatter(x, y, color = color)
#plot errors
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)
#plot model
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
|
normal
|
{
"blob_id": "9e16921d83a5f62aad694b26a92b57b97ccda461",
"index": 1651,
"step-1": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n <mask token>\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-2": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-3": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lmfit import minimize, Parameters, fit_report\nimport logging\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-5": "\"\"\"After seeing how great the lmfit package, I was inspired to create my own\nobject using it. This acts as a fitting template. \n\"\"\"\n##-------------------------------PREAMBLE-----------------------------------##\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom lmfit import minimize, Parameters, fit_report \nimport logging \n\n##-------------------------------CLASS DEFINITION-----------------------------------##\n\nclass FitTemplate(): \n def __init__(self, fit_function, log_dir = None):\n self.fit_function = fit_function \n self.parameters = Parameters()\n self.fit_result = None\n\n #setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,\n #and is printed to log file\n if log_dir is not None: \n logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n \n\n def residuals_wrapper(self, parameters, x, data,weights,**kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data)*weights)**2\n \n def do_minimisation(self, x, data, weights = 1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None: \n raise ValueError(\"No fit result! Do a fit before asking for\")\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print() \n \n def print_fit_result(self):\n logging.info((fit_report(self.fit_result)))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs): \n\n if ax is None:\n _, ax = plt.subplots(1\t,1, constrained_layout=True, figsize=(18, 9))\n plt.rcParams.update({'font.size': 16}) \n colours = ['b','m','c','r','tab:orange', 'tab:pink']\n\n #decide colour \n if c is not None: \n color = c \n elif colour_index is not None: \n color = colours[colour_index]\n else: \n color = colours[0]\n\n #scatter plot\n ax.scatter(x, y, color = color)\n #plot errors\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)\n #plot model\n fitdomain = np.linspace(x[0], x[-1], 1000)\t\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax \n \n \t\t\n \n \n \n\n\n\n\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class Client(OpenApiClient):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:
str, network: str, suffix: str, endpoint_map: Dict[str, str],
endpoint: str) ->str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(
endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id,
endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), self.do_rpcrequest(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
<|reserved_special_token_0|>
async def get_cdt_service_status_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request,
runtime)
<|reserved_special_token_0|>
async def open_cdt_service_with_options_async(self, request:
cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_service(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), self.do_rpcrequest(
'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_cb_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async
('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_cb_service_status(self, request: cdt20210813_models.
GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request,
runtime)
def open_cdt_cb_service_with_options(self, request: cdt20210813_models.
OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
async def open_cdt_cb_service_with_options_async(self, request:
cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), await self.do_rpcrequest_async('OpenCdtCbService',
'2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_cb_service(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request,
runtime)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(OpenApiClient):
<|reserved_special_token_0|>
def __init__(self, config: open_api_models.Config):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self.
_endpoint_rule, self._network, self._suffix, self._endpoint_map,
self._endpoint)
def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:
str, network: str, suffix: str, endpoint_map: Dict[str, str],
endpoint: str) ->str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(
endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id,
endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), self.do_rpcrequest(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_service_status(self, request: cdt20210813_models.
GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request,
runtime)
<|reserved_special_token_0|>
async def open_cdt_service_with_options_async(self, request:
cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_service(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), self.do_rpcrequest(
'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_cb_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async
('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_cb_service_status(self, request: cdt20210813_models.
GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request,
runtime)
def open_cdt_cb_service_with_options(self, request: cdt20210813_models.
OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
async def open_cdt_cb_service_with_options_async(self, request:
cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), await self.do_rpcrequest_async('OpenCdtCbService',
'2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_cb_service(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request,
runtime)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(OpenApiClient):
<|reserved_special_token_0|>
def __init__(self, config: open_api_models.Config):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self.
_endpoint_rule, self._network, self._suffix, self._endpoint_map,
self._endpoint)
def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:
str, network: str, suffix: str, endpoint_map: Dict[str, str],
endpoint: str) ->str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(
endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id,
endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), self.do_rpcrequest(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_service_status(self, request: cdt20210813_models.
GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request,
runtime)
def open_cdt_service_with_options(self, request: cdt20210813_models.
OpenCdtServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',
'POST', 'AK', 'json', req, runtime))
async def open_cdt_service_with_options_async(self, request:
cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_service(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), self.do_rpcrequest(
'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_cb_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async
('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_cb_service_status(self, request: cdt20210813_models.
GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request,
runtime)
def open_cdt_cb_service_with_options(self, request: cdt20210813_models.
OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
async def open_cdt_cb_service_with_options_async(self, request:
cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), await self.do_rpcrequest_async('OpenCdtCbService',
'2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_cb_service(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request,
runtime)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(OpenApiClient):
"""
* """
def __init__(self, config: open_api_models.Config):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self.
_endpoint_rule, self._network, self._suffix, self._endpoint_map,
self._endpoint)
def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:
str, network: str, suffix: str, endpoint_map: Dict[str, str],
endpoint: str) ->str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(
endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id,
endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), self.do_rpcrequest(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models
.RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(
'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_service_status(self, request: cdt20210813_models.
GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(self, request:
cdt20210813_models.GetCdtServiceStatusRequest
) ->cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request,
runtime)
def open_cdt_service_with_options(self, request: cdt20210813_models.
OpenCdtServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',
'POST', 'AK', 'json', req, runtime))
async def open_cdt_service_with_options_async(self, request:
cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_service(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(self, request: cdt20210813_models.
OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), self.do_rpcrequest(
'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
async def get_cdt_cb_service_status_with_options_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:
util_models.RuntimeOptions
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.
GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async
('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',
'json', req, runtime))
def get_cdt_cb_service_status(self, request: cdt20210813_models.
GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(self, request:
cdt20210813_models.GetCdtCbServiceStatusRequest
) ->cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request,
runtime)
def open_cdt_cb_service_with_options(self, request: cdt20210813_models.
OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions
) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',
'HTTPS', 'POST', 'AK', 'json', req, runtime))
async def open_cdt_cb_service_with_options_async(self, request:
cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.
RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))
return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse
(), await self.do_rpcrequest_async('OpenCdtCbService',
'2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))
def open_cdt_cb_service(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(self, request: cdt20210813_models.
OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request,
runtime)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from typing import Dict
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
from alibabacloud_cdt20210813 import models as cdt20210813_models
from alibabacloud_tea_util import models as util_models
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def get_endpoint(
self,
product_id: str,
region_id: str,
endpoint_rule: str,
network: str,
suffix: str,
endpoint_map: Dict[str, str],
endpoint: str,
) -> str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
self.do_rpcrequest('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_service_status(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request, runtime)
def open_cdt_service_with_options(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_service(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
self.do_rpcrequest('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_cb_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_cb_service_status(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request, runtime)
def open_cdt_cb_service_with_options(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
self.do_rpcrequest('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_cb_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
await self.do_rpcrequest_async('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_cb_service(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request, runtime)
|
flexible
|
{
"blob_id": "2e5d66033c2a049ba2423d01792a629bf4b8176d",
"index": 8728,
"step-1": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n <mask token>\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n <mask token>\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n <mask token>\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-2": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n <mask token>\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-3": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_service_with_options(self, request: cdt20210813_models.\n OpenCdtServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',\n 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-4": "<mask token>\n\n\nclass Client(OpenApiClient):\n \"\"\"\n * \"\"\"\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_service_with_options(self, request: cdt20210813_models.\n OpenCdtServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',\n 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-5": "# -*- coding: utf-8 -*-\n# This file is auto-generated, don't edit it. Thanks.\nfrom typing import Dict\nfrom Tea.core import TeaCore\n\nfrom alibabacloud_tea_openapi.client import Client as OpenApiClient\nfrom alibabacloud_tea_openapi import models as open_api_models\nfrom alibabacloud_tea_util.client import Client as UtilClient\nfrom alibabacloud_endpoint_util.client import Client as EndpointUtilClient\nfrom alibabacloud_cdt20210813 import models as cdt20210813_models\nfrom alibabacloud_tea_util import models as util_models\n\n\nclass Client(OpenApiClient):\n \"\"\"\n *\\\n \"\"\"\n def __init__(\n self, \n config: open_api_models.Config,\n ):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)\n\n def get_endpoint(\n self,\n product_id: str,\n region_id: str,\n endpoint_rule: str,\n network: str,\n suffix: str,\n endpoint_map: Dict[str, str],\n endpoint: str,\n ) -> str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtServiceStatusResponse(),\n self.do_rpcrequest('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def get_cdt_service_status_with_options_async(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtServiceStatusResponse(),\n await self.do_rpcrequest_async('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def get_cdt_service_status(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request, runtime)\n\n def open_cdt_service_with_options(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def open_cdt_service_with_options_async(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def open_cdt_service(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtCbServiceStatusResponse(),\n self.do_rpcrequest('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def get_cdt_cb_service_status_with_options_async(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtCbServiceStatusResponse(),\n await self.do_rpcrequest_async('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def get_cdt_cb_service_status(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request, runtime)\n\n def open_cdt_cb_service_with_options(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtCbServiceResponse(),\n self.do_rpcrequest('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def open_cdt_cb_service_with_options_async(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtCbServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def open_cdt_cb_service(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request, runtime)\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
from sys import stdin
last_emp = emp_id = ''
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
# last_emp = ''
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
|
normal
|
{
"blob_id": "3a2b1ddab422d450ad3b5684cbed1847d31fb8e6",
"index": 2839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-3": "<mask token>\nlast_emp = emp_id = ''\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-4": "from sys import stdin\nlast_emp = emp_id = ''\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-5": "from sys import stdin\n\nlast_emp = emp_id = ''\n\nfor line in stdin:\n data = line.strip().split(',')\n\n if last_emp != '' and last_emp != emp_id:\n print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')\n\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n # last_emp = ''\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\n\nprint(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('# Print whole tree')
print(chunks.pprint())
print("""
# Print noun phrases only""")
for subtree in chunks.subtrees():
if subtree.label() == 'NP':
print(' '.join(e[0] for e in list(subtree)))
print(subtree.pprint())
chunks.draw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'
words = konlpy.tag.Twitter().pos(sentence)
grammar = """
NP: {<N.*>*<Suffix>?} # Noun phrase
VP: {<V.*>*} # Verb phrase
AP: {<A.*>*} # Adjective phrase
"""
parser = nltk.RegexpParser(grammar)
chunks = parser.parse(words)
print('# Print whole tree')
print(chunks.pprint())
print("""
# Print noun phrases only""")
for subtree in chunks.subtrees():
if subtree.label() == 'NP':
print(' '.join(e[0] for e in list(subtree)))
print(subtree.pprint())
chunks.draw()
<|reserved_special_token_1|>
import konlpy
import nltk
sentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'
words = konlpy.tag.Twitter().pos(sentence)
grammar = """
NP: {<N.*>*<Suffix>?} # Noun phrase
VP: {<V.*>*} # Verb phrase
AP: {<A.*>*} # Adjective phrase
"""
parser = nltk.RegexpParser(grammar)
chunks = parser.parse(words)
print('# Print whole tree')
print(chunks.pprint())
print("""
# Print noun phrases only""")
for subtree in chunks.subtrees():
if subtree.label() == 'NP':
print(' '.join(e[0] for e in list(subtree)))
print(subtree.pprint())
chunks.draw()
<|reserved_special_token_1|>
import konlpy
import nltk
# POS tag a sentence
sentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'
words = konlpy.tag.Twitter().pos(sentence)
# Define a chunk grammar, or chunking rules, then chunk
grammar = """
NP: {<N.*>*<Suffix>?} # Noun phrase
VP: {<V.*>*} # Verb phrase
AP: {<A.*>*} # Adjective phrase
"""
parser = nltk.RegexpParser(grammar)
chunks = parser.parse(words)
print("# Print whole tree")
print(chunks.pprint())
print("\n# Print noun phrases only")
for subtree in chunks.subtrees():
if subtree.label()=='NP':
print(' '.join((e[0] for e in list(subtree))))
print(subtree.pprint())
# Display the chunk tree
chunks.draw()
|
flexible
|
{
"blob_id": "6b647dc2775f54706a6c18ee91145ba60d70be21",
"index": 4453,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('# Print whole tree')\nprint(chunks.pprint())\nprint(\"\"\"\n# Print noun phrases only\"\"\")\nfor subtree in chunks.subtrees():\n if subtree.label() == 'NP':\n print(' '.join(e[0] for e in list(subtree)))\n print(subtree.pprint())\nchunks.draw()\n",
"step-3": "<mask token>\nsentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'\nwords = konlpy.tag.Twitter().pos(sentence)\ngrammar = \"\"\"\nNP: {<N.*>*<Suffix>?} # Noun phrase\nVP: {<V.*>*} # Verb phrase\nAP: {<A.*>*} # Adjective phrase\n\"\"\"\nparser = nltk.RegexpParser(grammar)\nchunks = parser.parse(words)\nprint('# Print whole tree')\nprint(chunks.pprint())\nprint(\"\"\"\n# Print noun phrases only\"\"\")\nfor subtree in chunks.subtrees():\n if subtree.label() == 'NP':\n print(' '.join(e[0] for e in list(subtree)))\n print(subtree.pprint())\nchunks.draw()\n",
"step-4": "import konlpy\nimport nltk\nsentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'\nwords = konlpy.tag.Twitter().pos(sentence)\ngrammar = \"\"\"\nNP: {<N.*>*<Suffix>?} # Noun phrase\nVP: {<V.*>*} # Verb phrase\nAP: {<A.*>*} # Adjective phrase\n\"\"\"\nparser = nltk.RegexpParser(grammar)\nchunks = parser.parse(words)\nprint('# Print whole tree')\nprint(chunks.pprint())\nprint(\"\"\"\n# Print noun phrases only\"\"\")\nfor subtree in chunks.subtrees():\n if subtree.label() == 'NP':\n print(' '.join(e[0] for e in list(subtree)))\n print(subtree.pprint())\nchunks.draw()\n",
"step-5": "import konlpy\nimport nltk\n\n# POS tag a sentence\nsentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'\nwords = konlpy.tag.Twitter().pos(sentence)\n\n# Define a chunk grammar, or chunking rules, then chunk\ngrammar = \"\"\"\nNP: {<N.*>*<Suffix>?} # Noun phrase\nVP: {<V.*>*} # Verb phrase\nAP: {<A.*>*} # Adjective phrase\n\"\"\"\nparser = nltk.RegexpParser(grammar)\nchunks = parser.parse(words)\nprint(\"# Print whole tree\")\nprint(chunks.pprint())\n\nprint(\"\\n# Print noun phrases only\")\nfor subtree in chunks.subtrees():\n if subtree.label()=='NP':\n print(' '.join((e[0] for e in list(subtree))))\n print(subtree.pprint())\n\n# Display the chunk tree\nchunks.draw()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_memoize_insert_sort_key(con, snapshot):
table = con.table('airlines')
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.
arrdelay - t.arrdelay.mean())
worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)
result = repr(worst)
assert result.count('airlines') == 1
snapshot.assert_match(result, 'repr.txt')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_format_sql_query_result(con, snapshot):
t = con.table('airlines')
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})
with con.set_query_schema(query, schema):
expr = t.sql(query)
expr = expr.op().copy(name='foo').to_expr()
expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.
avg_arrdelay.round(1))
snapshot.assert_match(repr(expr), 'repr.txt')
<|reserved_special_token_0|>
def test_memoize_insert_sort_key(con, snapshot):
table = con.table('airlines')
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.
arrdelay - t.arrdelay.mean())
worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)
result = repr(worst)
assert result.count('airlines') == 1
snapshot.assert_match(result, 'repr.txt')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_format_sql_query_result(con, snapshot):
t = con.table('airlines')
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})
with con.set_query_schema(query, schema):
expr = t.sql(query)
expr = expr.op().copy(name='foo').to_expr()
expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.
avg_arrdelay.round(1))
snapshot.assert_match(repr(expr), 'repr.txt')
def test_memoize_database_table(con, snapshot):
table = con.table('test1')
table2 = con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
met1 = (table3['f'] - table2['value']).mean().name('foo')
expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[
table3['g'], table2['key']])
result = repr(expr)
assert result.count('test1') == 1
assert result.count('test2') == 1
snapshot.assert_match(result, 'repr.txt')
def test_memoize_insert_sort_key(con, snapshot):
table = con.table('airlines')
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.
arrdelay - t.arrdelay.mean())
worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)
result = repr(worst)
assert result.count('airlines') == 1
snapshot.assert_match(result, 'repr.txt')
<|reserved_special_token_1|>
from __future__ import annotations
import ibis
from ibis import _
def test_format_sql_query_result(con, snapshot):
t = con.table('airlines')
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})
with con.set_query_schema(query, schema):
expr = t.sql(query)
expr = expr.op().copy(name='foo').to_expr()
expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.
avg_arrdelay.round(1))
snapshot.assert_match(repr(expr), 'repr.txt')
def test_memoize_database_table(con, snapshot):
table = con.table('test1')
table2 = con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
met1 = (table3['f'] - table2['value']).mean().name('foo')
expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[
table3['g'], table2['key']])
result = repr(expr)
assert result.count('test1') == 1
assert result.count('test2') == 1
snapshot.assert_match(result, 'repr.txt')
def test_memoize_insert_sort_key(con, snapshot):
table = con.table('airlines')
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.
arrdelay - t.arrdelay.mean())
worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)
result = repr(worst)
assert result.count('airlines') == 1
snapshot.assert_match(result, 'repr.txt')
<|reserved_special_token_1|>
from __future__ import annotations
import ibis
from ibis import _
def test_format_sql_query_result(con, snapshot):
t = con.table("airlines")
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({"carrier": "string", "avg_arrdelay": "double"})
with con.set_query_schema(query, schema):
expr = t.sql(query)
# name is autoincremented so we need to set it manually to make the
# snapshot stable
expr = expr.op().copy(name="foo").to_expr()
expr = expr.mutate(
island=_.carrier.lower(),
avg_arrdelay=_.avg_arrdelay.round(1),
)
snapshot.assert_match(repr(expr), "repr.txt")
def test_memoize_database_table(con, snapshot):
table = con.table("test1")
table2 = con.table("test2")
filter_pred = table["f"] > 0
table3 = table[filter_pred]
join_pred = table3["g"] == table2["key"]
joined = table2.inner_join(table3, [join_pred])
met1 = (table3["f"] - table2["value"]).mean().name("foo")
expr = joined.aggregate(
[met1, table3["f"].sum().name("bar")], by=[table3["g"], table2["key"]]
)
result = repr(expr)
assert result.count("test1") == 1
assert result.count("test2") == 1
snapshot.assert_match(result, "repr.txt")
def test_memoize_insert_sort_key(con, snapshot):
table = con.table("airlines")
t = table["arrdelay", "dest"]
expr = t.group_by("dest").mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
worst = expr[expr.dev.notnull()].order_by(ibis.desc("dev")).limit(10)
result = repr(worst)
assert result.count("airlines") == 1
snapshot.assert_match(result, "repr.txt")
|
flexible
|
{
"blob_id": "97ff8dae060475b0efbc8d39e9fc251be8ac091b",
"index": 6264,
"step-1": "<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-2": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\n<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-3": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-4": "from __future__ import annotations\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-5": "from __future__ import annotations\n\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table(\"airlines\")\n\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({\"carrier\": \"string\", \"avg_arrdelay\": \"double\"})\n\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n # name is autoincremented so we need to set it manually to make the\n # snapshot stable\n expr = expr.op().copy(name=\"foo\").to_expr()\n\n expr = expr.mutate(\n island=_.carrier.lower(),\n avg_arrdelay=_.avg_arrdelay.round(1),\n )\n\n snapshot.assert_match(repr(expr), \"repr.txt\")\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table(\"test1\")\n table2 = con.table(\"test2\")\n\n filter_pred = table[\"f\"] > 0\n table3 = table[filter_pred]\n join_pred = table3[\"g\"] == table2[\"key\"]\n\n joined = table2.inner_join(table3, [join_pred])\n\n met1 = (table3[\"f\"] - table2[\"value\"]).mean().name(\"foo\")\n expr = joined.aggregate(\n [met1, table3[\"f\"].sum().name(\"bar\")], by=[table3[\"g\"], table2[\"key\"]]\n )\n\n result = repr(expr)\n assert result.count(\"test1\") == 1\n assert result.count(\"test2\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table(\"airlines\")\n\n t = table[\"arrdelay\", \"dest\"]\n expr = t.group_by(\"dest\").mutate(\n dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()\n )\n\n worst = expr[expr.dev.notnull()].order_by(ibis.desc(\"dev\")).limit(10)\n\n result = repr(worst)\n assert result.count(\"airlines\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_emotion_label(emotion):
return LABELS['emotion'][emotion]
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]['gender'] = row[1]
output_dict[row[0]]['age_group'] = row[2]
output_dict[row[0]]['race'] = row[3]
output_dict[row[0]]['emotion'] = row[4]
output_dict[row[0]]['identity'] = row[0].split('_')[1]
<|reserved_special_token_0|>
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + '_aligned' + path[1]
identity = image_meta['identity']
image = cv2.imread(path)
if image is None:
print('WARNING! Unable to read {}'.format(image_path))
print(' - At {}'.format(path))
discarded_items['unavailable_image'].append(identity)
continue
if np.max(image) == np.min(image):
print('Blank image {}'.format(image_path))
discarded_items['blank_image'].append(identity)
continue
sample_partition = (PARTITION_TEST if partition_label ==
PARTITION_TEST else get_partition(identity))
gender = rafDBdata[image_path]['gender']
age = rafDBdata[image_path]['age_group']
ethnicity = rafDBdata[image_path]['ethnicity']
emotion = rafDBdata[image_path]['emotion']
labels = gender, age, ethnicity, emotion
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'
] is None else image_meta['roi']
sample = {'img': path, 'label': labels, 'roi': roi, 'part':
sample_partition}
data.append(sample)
if debug_max_num_samples is not None and len(data
) >= debug_max_num_samples:
print('Stopped loading. Debug max samples: ', debug_max_num_samples
)
break
print('Data loaded. {} samples'.format(len(data)))
print('Discarded for unavailable image: ', len(discarded_items[
'unavailable_image']))
print('Discarded for blank image: ', len(discarded_items['blank_image']))
return data
<|reserved_special_token_0|>
class RAFDBMulti:
def __init__(self, partition='train', imagesdir=
'data/RAF-DB/basic/Image/{aligned}', csvmeta=
'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3), augment=True, custom_augmentation=None,
preprocessing='full_normalization', debug_max_num_samples=None,
include_gender=False, include_age_group=False, include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = '_' + str(debug_max_num_samples
) if debug_max_num_samples is not None else ''
cache_task = '{}{}{}_emotion'.format('_withgender' if
include_gender else '', '_withagegroup' if include_age_group else
'', '_withrace' if include_race else '')
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(
task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join('cache', cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print('cache file name %s' % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print('Data loaded. %d samples, from cache' % len(self.data))
except FileNotFoundError:
print('Loading %s data from scratch' % partition)
load_partition = ('train' if partition_label == PARTITION_TRAIN or
partition_label == PARTITION_VAL else 'test')
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=
'aligned' if ALIGNED else 'original'))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=
load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group,
include_race)
print('Loading {} dataset'.format(partition))
loaded_data = _load_dataset(imagesdir, partition_label,
debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition,
verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] ==
partition_label]
with open(cache_file_name, 'wb') as f:
print('Pickle dumping')
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data, target_shape=self.
target_shape, with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation, batch_size=
batch_size, num_classes=self.get_num_classes(),
preprocessing=self.preprocessing, fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_gender_label(gender):
if gender == 'male':
return LABELS['gender']['male']
elif gender == 'female':
return LABELS['gender']['female']
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels['age_group'][age_group_text]
<|reserved_special_token_0|>
def get_emotion_label(emotion):
return LABELS['emotion'][emotion]
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]['gender'] = row[1]
output_dict[row[0]]['age_group'] = row[2]
output_dict[row[0]]['race'] = row[3]
output_dict[row[0]]['emotion'] = row[4]
output_dict[row[0]]['identity'] = row[0].split('_')[1]
<|reserved_special_token_0|>
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + '_aligned' + path[1]
identity = image_meta['identity']
image = cv2.imread(path)
if image is None:
print('WARNING! Unable to read {}'.format(image_path))
print(' - At {}'.format(path))
discarded_items['unavailable_image'].append(identity)
continue
if np.max(image) == np.min(image):
print('Blank image {}'.format(image_path))
discarded_items['blank_image'].append(identity)
continue
sample_partition = (PARTITION_TEST if partition_label ==
PARTITION_TEST else get_partition(identity))
gender = rafDBdata[image_path]['gender']
age = rafDBdata[image_path]['age_group']
ethnicity = rafDBdata[image_path]['ethnicity']
emotion = rafDBdata[image_path]['emotion']
labels = gender, age, ethnicity, emotion
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'
] is None else image_meta['roi']
sample = {'img': path, 'label': labels, 'roi': roi, 'part':
sample_partition}
data.append(sample)
if debug_max_num_samples is not None and len(data
) >= debug_max_num_samples:
print('Stopped loading. Debug max samples: ', debug_max_num_samples
)
break
print('Data loaded. {} samples'.format(len(data)))
print('Discarded for unavailable image: ', len(discarded_items[
'unavailable_image']))
print('Discarded for blank image: ', len(discarded_items['blank_image']))
return data
<|reserved_special_token_0|>
class RAFDBMulti:
def __init__(self, partition='train', imagesdir=
'data/RAF-DB/basic/Image/{aligned}', csvmeta=
'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3), augment=True, custom_augmentation=None,
preprocessing='full_normalization', debug_max_num_samples=None,
include_gender=False, include_age_group=False, include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = '_' + str(debug_max_num_samples
) if debug_max_num_samples is not None else ''
cache_task = '{}{}{}_emotion'.format('_withgender' if
include_gender else '', '_withagegroup' if include_age_group else
'', '_withrace' if include_race else '')
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(
task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join('cache', cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print('cache file name %s' % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print('Data loaded. %d samples, from cache' % len(self.data))
except FileNotFoundError:
print('Loading %s data from scratch' % partition)
load_partition = ('train' if partition_label == PARTITION_TRAIN or
partition_label == PARTITION_VAL else 'test')
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=
'aligned' if ALIGNED else 'original'))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=
load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group,
include_race)
print('Loading {} dataset'.format(partition))
loaded_data = _load_dataset(imagesdir, partition_label,
debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition,
verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] ==
partition_label]
with open(cache_file_name, 'wb') as f:
print('Pickle dumping')
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data, target_shape=self.
target_shape, with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation, batch_size=
batch_size, num_classes=self.get_num_classes(),
preprocessing=self.preprocessing, fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset='test', debug_samples=None):
if dataset.startswith('train') or dataset.startswith('val'):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0
], batch[1][1], batch[1][2], batch[1][3]):
facemax = np.max(im)
facemin = np.min(im)
print('Sample:', i)
print('Labels:', gender, age, ethnicity, emotion)
print('Gender:', verbose_gender(gender), '- Age:', verbose_age(
age), '- Ethnicity:', verbose_ethnicity(ethnicity),
'- Emotion:', verbose_emotion(emotion))
im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8
)
cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), (0, im.shape[1]), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), im)
i += 1
if cv2.waitKey(0) & 255 == ord('q'):
cv2.destroyAllWindows()
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _load_traits(input_meta, include_gender=False, include_age_group=False,
include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta['identity']
roi = None
rafDBdata[image_path] = {'roi': roi, 'identity': identity,
'gender': get_gender_label(image_meta['gender']) if
include_gender else MASK_VALUE, 'age_group':
get_age_group_label(image_meta['age_group']) if
include_age_group else MASK_VALUE, 'ethnicity':
get_ethnicity_label(image_meta['race']) if include_race else
MASK_VALUE, 'emotion': get_emotion_label(image_meta[
'emotion']), 'sample_num': i}
i += 1
print('Metadata:', len(rafDBdata))
if errors:
print('Gender errors', errors['gender'])
print('Age errors', errors['age'])
print('Ethnicity errors', errors['ethnicity'])
def get_gender_label(gender):
if gender == 'male':
return LABELS['gender']['male']
elif gender == 'female':
return LABELS['gender']['female']
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels['age_group'][age_group_text]
<|reserved_special_token_0|>
def get_emotion_label(emotion):
return LABELS['emotion'][emotion]
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]['gender'] = row[1]
output_dict[row[0]]['age_group'] = row[2]
output_dict[row[0]]['race'] = row[3]
output_dict[row[0]]['emotion'] = row[4]
output_dict[row[0]]['identity'] = row[0].split('_')[1]
<|reserved_special_token_0|>
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + '_aligned' + path[1]
identity = image_meta['identity']
image = cv2.imread(path)
if image is None:
print('WARNING! Unable to read {}'.format(image_path))
print(' - At {}'.format(path))
discarded_items['unavailable_image'].append(identity)
continue
if np.max(image) == np.min(image):
print('Blank image {}'.format(image_path))
discarded_items['blank_image'].append(identity)
continue
sample_partition = (PARTITION_TEST if partition_label ==
PARTITION_TEST else get_partition(identity))
gender = rafDBdata[image_path]['gender']
age = rafDBdata[image_path]['age_group']
ethnicity = rafDBdata[image_path]['ethnicity']
emotion = rafDBdata[image_path]['emotion']
labels = gender, age, ethnicity, emotion
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'
] is None else image_meta['roi']
sample = {'img': path, 'label': labels, 'roi': roi, 'part':
sample_partition}
data.append(sample)
if debug_max_num_samples is not None and len(data
) >= debug_max_num_samples:
print('Stopped loading. Debug max samples: ', debug_max_num_samples
)
break
print('Data loaded. {} samples'.format(len(data)))
print('Discarded for unavailable image: ', len(discarded_items[
'unavailable_image']))
print('Discarded for blank image: ', len(discarded_items['blank_image']))
return data
<|reserved_special_token_0|>
class RAFDBMulti:
def __init__(self, partition='train', imagesdir=
'data/RAF-DB/basic/Image/{aligned}', csvmeta=
'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3), augment=True, custom_augmentation=None,
preprocessing='full_normalization', debug_max_num_samples=None,
include_gender=False, include_age_group=False, include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = '_' + str(debug_max_num_samples
) if debug_max_num_samples is not None else ''
cache_task = '{}{}{}_emotion'.format('_withgender' if
include_gender else '', '_withagegroup' if include_age_group else
'', '_withrace' if include_race else '')
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(
task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join('cache', cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print('cache file name %s' % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print('Data loaded. %d samples, from cache' % len(self.data))
except FileNotFoundError:
print('Loading %s data from scratch' % partition)
load_partition = ('train' if partition_label == PARTITION_TRAIN or
partition_label == PARTITION_VAL else 'test')
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=
'aligned' if ALIGNED else 'original'))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=
load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group,
include_race)
print('Loading {} dataset'.format(partition))
loaded_data = _load_dataset(imagesdir, partition_label,
debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition,
verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] ==
partition_label]
with open(cache_file_name, 'wb') as f:
print('Pickle dumping')
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data, target_shape=self.
target_shape, with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation, batch_size=
batch_size, num_classes=self.get_num_classes(),
preprocessing=self.preprocessing, fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset='test', debug_samples=None):
if dataset.startswith('train') or dataset.startswith('val'):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0
], batch[1][1], batch[1][2], batch[1][3]):
facemax = np.max(im)
facemin = np.min(im)
print('Sample:', i)
print('Labels:', gender, age, ethnicity, emotion)
print('Gender:', verbose_gender(gender), '- Age:', verbose_age(
age), '- Ethnicity:', verbose_ethnicity(ethnicity),
'- Emotion:', verbose_emotion(emotion))
im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8
)
cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), (0, im.shape[1]), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), im)
i += 1
if cv2.waitKey(0) & 255 == ord('q'):
cv2.destroyAllWindows()
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
from cv2 import cv2
from tqdm import tqdm
import os
import pickle
import numpy as np
import csv
import sys
from collections import defaultdict
from dataset_utils import *
sys.path.append('../training')
from dataset_tools import enclosing_square, add_margin, DataGenerator
EXT_ROOT = os.path.dirname(os.path.abspath(__file__))
rafdb_labels = {'age_group': {'0-3': 0, '4-19': 1, '20-39': 2, '40-69': 3,
'70+': 4}, 'race': {'Caucasian': 0, 'African-American': 1, 'Asian': 2}}
rafDBmeta = defaultdict(dict)
rafDBpartition = dict()
rafDBdata = None
def _load_traits(input_meta, include_gender=False, include_age_group=False,
include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta['identity']
roi = None
rafDBdata[image_path] = {'roi': roi, 'identity': identity,
'gender': get_gender_label(image_meta['gender']) if
include_gender else MASK_VALUE, 'age_group':
get_age_group_label(image_meta['age_group']) if
include_age_group else MASK_VALUE, 'ethnicity':
get_ethnicity_label(image_meta['race']) if include_race else
MASK_VALUE, 'emotion': get_emotion_label(image_meta[
'emotion']), 'sample_num': i}
i += 1
print('Metadata:', len(rafDBdata))
if errors:
print('Gender errors', errors['gender'])
print('Age errors', errors['age'])
print('Ethnicity errors', errors['ethnicity'])
def get_gender_label(gender):
if gender == 'male':
return LABELS['gender']['male']
elif gender == 'female':
return LABELS['gender']['female']
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels['age_group'][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels['race'][ethnicity_text]
def get_emotion_label(emotion):
return LABELS['emotion'][emotion]
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]['gender'] = row[1]
output_dict[row[0]]['age_group'] = row[2]
output_dict[row[0]]['race'] = row[3]
output_dict[row[0]]['emotion'] = row[4]
output_dict[row[0]]['identity'] = row[0].split('_')[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = faces + 1, partition
except KeyError:
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = 1, partition
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + '_aligned' + path[1]
identity = image_meta['identity']
image = cv2.imread(path)
if image is None:
print('WARNING! Unable to read {}'.format(image_path))
print(' - At {}'.format(path))
discarded_items['unavailable_image'].append(identity)
continue
if np.max(image) == np.min(image):
print('Blank image {}'.format(image_path))
discarded_items['blank_image'].append(identity)
continue
sample_partition = (PARTITION_TEST if partition_label ==
PARTITION_TEST else get_partition(identity))
gender = rafDBdata[image_path]['gender']
age = rafDBdata[image_path]['age_group']
ethnicity = rafDBdata[image_path]['ethnicity']
emotion = rafDBdata[image_path]['emotion']
labels = gender, age, ethnicity, emotion
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'
] is None else image_meta['roi']
sample = {'img': path, 'label': labels, 'roi': roi, 'part':
sample_partition}
data.append(sample)
if debug_max_num_samples is not None and len(data
) >= debug_max_num_samples:
print('Stopped loading. Debug max samples: ', debug_max_num_samples
)
break
print('Data loaded. {} samples'.format(len(data)))
print('Discarded for unavailable image: ', len(discarded_items[
'unavailable_image']))
print('Discarded for blank image: ', len(discarded_items['blank_image']))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self, partition='train', imagesdir=
'data/RAF-DB/basic/Image/{aligned}', csvmeta=
'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3), augment=True, custom_augmentation=None,
preprocessing='full_normalization', debug_max_num_samples=None,
include_gender=False, include_age_group=False, include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = '_' + str(debug_max_num_samples
) if debug_max_num_samples is not None else ''
cache_task = '{}{}{}_emotion'.format('_withgender' if
include_gender else '', '_withagegroup' if include_age_group else
'', '_withrace' if include_race else '')
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(
task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join('cache', cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print('cache file name %s' % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print('Data loaded. %d samples, from cache' % len(self.data))
except FileNotFoundError:
print('Loading %s data from scratch' % partition)
load_partition = ('train' if partition_label == PARTITION_TRAIN or
partition_label == PARTITION_VAL else 'test')
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=
'aligned' if ALIGNED else 'original'))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=
load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group,
include_race)
print('Loading {} dataset'.format(partition))
loaded_data = _load_dataset(imagesdir, partition_label,
debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition,
verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] ==
partition_label]
with open(cache_file_name, 'wb') as f:
print('Pickle dumping')
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data, target_shape=self.
target_shape, with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation, batch_size=
batch_size, num_classes=self.get_num_classes(),
preprocessing=self.preprocessing, fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset='test', debug_samples=None):
if dataset.startswith('train') or dataset.startswith('val'):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=
'vggface2', debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0
], batch[1][1], batch[1][2], batch[1][3]):
facemax = np.max(im)
facemin = np.min(im)
print('Sample:', i)
print('Labels:', gender, age, ethnicity, emotion)
print('Gender:', verbose_gender(gender), '- Age:', verbose_age(
age), '- Ethnicity:', verbose_ethnicity(ethnicity),
'- Emotion:', verbose_emotion(emotion))
im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8
)
cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), (0, im.shape[1]), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),
verbose_age(age), verbose_ethnicity(ethnicity),
verbose_emotion(emotion)), im)
i += 1
if cv2.waitKey(0) & 255 == ord('q'):
cv2.destroyAllWindows()
return
if '__main__' == __name__:
test_multi('train')
test_multi('val')
test_multi('test')
<|reserved_special_token_1|>
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
from cv2 import cv2
from tqdm import tqdm
import os
import pickle
import numpy as np
import csv
import sys
from collections import defaultdict
from dataset_utils import *
sys.path.append("../training")
from dataset_tools import enclosing_square, add_margin, DataGenerator
EXT_ROOT = os.path.dirname(os.path.abspath(__file__))
rafdb_labels = {
"age_group": {
"0-3": 0,
"4-19": 1,
"20-39": 2,
"40-69": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"])
# Labelling
def get_gender_label(gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]):
facemax = np.max(im)
facemin = np.min(im)
print("Sample:", i)
print("Labels:", gender, age, ethnicity, emotion)
print("Gender:", verbose_gender(gender),
"- Age:", verbose_age(age),
"- Ethnicity:", verbose_ethnicity(ethnicity),
"- Emotion:", verbose_emotion(emotion))
im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8)
cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)),
(0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im)
i += 1
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
return
if '__main__' == __name__:
test_multi("train")
test_multi("val")
test_multi("test")
|
flexible
|
{
"blob_id": "0b7d1564ecbd78086d59629a2058716f41b4b8c8",
"index": 9686,
"step-1": "<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n",
"step-4": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\nfrom dataset_utils import *\nsys.path.append('../training')\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\nrafdb_labels = {'age_group': {'0-3': 0, '4-19': 1, '20-39': 2, '40-69': 3,\n '70+': 4}, 'race': {'Caucasian': 0, 'African-American': 1, 'Asian': 2}}\nrafDBmeta = defaultdict(dict)\nrafDBpartition = dict()\nrafDBdata = None\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels['race'][ethnicity_text]\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\ndef get_partition(identity_label):\n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = faces + 1, partition\n except KeyError:\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = 1, partition\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\nALIGNED = True\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi('train')\n test_multi('val')\n test_multi('test')\n",
"step-5": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\n\nfrom dataset_utils import *\n\nsys.path.append(\"../training\")\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\n\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nrafdb_labels = {\n \"age_group\": {\n \"0-3\": 0,\n \"4-19\": 1,\n \"20-39\": 2,\n \"40-69\": 3,\n \"70+\":4 \n },\n \"race\": {\n \"Caucasian\": 0,\n \"African-American\": 1,\n \"Asian\": 2\n }\n}\n\n# converted labels\nrafDBmeta = defaultdict(dict)\n\n# multitask labels\nrafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose\nrafDBdata = None # dict({image_path: ... }) # for ensembling purpose\n\n\n# ORDER: Gender, Age, Ethnicity, Emotion\ndef _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta[\"identity\"]\n roi = None # aligned image, roi is the image size\n rafDBdata[image_path] = {\n \"roi\" : roi,\n \"identity\" : identity,\n \"gender\" : get_gender_label(image_meta[\"gender\"]) if include_gender else MASK_VALUE,\n \"age_group\" : get_age_group_label(image_meta[\"age_group\"]) if include_age_group else MASK_VALUE,\n \"ethnicity\": get_ethnicity_label(image_meta[\"race\"]) if include_race else MASK_VALUE,\n \"emotion\": get_emotion_label(image_meta[\"emotion\"]),\n \"sample_num\" : i\n }\n i += 1 \n print(\"Metadata:\", len(rafDBdata))\n if errors:\n print(\"Gender errors\", errors[\"gender\"])\n print(\"Age errors\", errors[\"age\"])\n print(\"Ethnicity errors\", errors[\"ethnicity\"])\n\n\n# Labelling\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS[\"gender\"][\"male\"]\n elif gender == 'female':\n return LABELS[\"gender\"][\"female\"]\n return MASK_VALUE\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels[\"age_group\"][age_group_text]\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels[\"race\"][ethnicity_text]\n\ndef get_emotion_label(emotion):\n return LABELS[\"emotion\"][emotion]\n\n\n# Load from csv\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]][\"gender\"] = row[1]\n output_dict[row[0]][\"age_group\"] = row[2]\n output_dict[row[0]][\"race\"] = row[3]\n output_dict[row[0]][\"emotion\"] = row[4]\n output_dict[row[0]][\"identity\"] = row[0].split(\"_\")[1]\n\n\ndef get_partition(identity_label): \n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = (faces + 1, partition)\n except KeyError:\n # split 20/80 stratified by identity\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = (1, partition)\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + \"_aligned\" + path[1]\n identity = image_meta[\"identity\"]\n image = cv2.imread(path)\n if image is None:\n print(\"WARNING! Unable to read {}\".format(image_path))\n print(\" - At {}\".format(path))\n discarded_items[\"unavailable_image\"].append(identity)\n continue\n if np.max(image) == np.min(image):\n print(\"Blank image {}\".format(image_path))\n discarded_items[\"blank_image\"].append(identity)\n continue\n sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)\n gender = rafDBdata[image_path][\"gender\"]\n age = rafDBdata[image_path][\"age_group\"]\n ethnicity = rafDBdata[image_path][\"ethnicity\"]\n emotion = rafDBdata[image_path][\"emotion\"]\n labels = (gender, age, ethnicity, emotion)\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta[\"roi\"] is None else image_meta[\"roi\"] \n sample = {\n 'img': path,\n 'label': labels,\n 'roi': roi,\n 'part': sample_partition\n }\n data.append(sample)\n if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:\n print(\"Stopped loading. Debug max samples: \", debug_max_num_samples)\n break\n print(\"Data loaded. {} samples\".format(len(data)))\n print(\"Discarded for unavailable image: \", len(discarded_items[\"unavailable_image\"]))\n print(\"Discarded for blank image: \", len(discarded_items[\"blank_image\"]))\n return data\n\n\nALIGNED = True\n\nclass RAFDBMulti:\n def __init__(self,\n partition='train',\n imagesdir='data/RAF-DB/basic/Image/{aligned}',\n csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3),\n augment=True,\n custom_augmentation=None,\n preprocessing='full_normalization',\n debug_max_num_samples=None,\n include_gender=False,\n include_age_group=False,\n include_race=False,\n **kwargs):\n \n partition_label = partition_select(partition)\n\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n\n num_samples = \"_\" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''\n cache_task = \"{}{}{}_emotion\".format(\n \"_withgender\" if include_gender else \"\",\n \"_withagegroup\" if include_age_group else \"\",\n \"_withrace\" if include_race else \"\"\n )\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join(\"cache\", cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print(\"cache file name %s\" % cache_file_name)\n\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print(\"Data loaded. %d samples, from cache\" % (len(self.data)))\n except FileNotFoundError:\n print(\"Loading %s data from scratch\" % partition)\n load_partition = \"train\" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else \"test\"\n\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\"aligned\" if ALIGNED else \"original\"))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))\n\n _load_meta_from_csv(csvmeta, rafDBmeta)\n\n _load_traits(rafDBmeta, include_gender, include_age_group, include_race)\n \n print(\"Loading {} dataset\".format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)\n\n print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] == partition_label]\n with open(cache_file_name, 'wb') as f:\n print(\"Pickle dumping\")\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data,\n target_shape=self.target_shape,\n with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation,\n batch_size=batch_size,\n num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, \n fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset=\"test\", debug_samples=None):\n\n if dataset.startswith(\"train\") or dataset.startswith(\"val\"):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset,\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test',\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print(\"Sample:\", i)\n print(\"Labels:\", gender, age, ethnicity, emotion)\n print(\"Gender:\", verbose_gender(gender),\n \"- Age:\", verbose_age(age),\n \"- Ethnicity:\", verbose_ethnicity(ethnicity),\n \"- Emotion:\", verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8)\n cv2.putText(im, \"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)),\n (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow(\"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi(\"train\")\n test_multi(\"val\")\n test_multi(\"test\")\n",
"step-ids": [
9,
12,
13,
18,
19
]
}
|
[
9,
12,
13,
18,
19
] |
class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
<|reserved_special_token_1|>
class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
<|reserved_special_token_0|>
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
<|reserved_special_token_0|>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
<|reserved_special_token_1|>
class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
def add(self):
self._d = {}
self._d['CustomerName'] = self._name
self._d['CustomerPhonenumber'] = self._pno
self._d['CustomerAddress'] = self._add
self._d['CustomerPin'] = self._pin
self._d['CustomerAccountNumber'] = self._acc
self._d['CustomerBalance'] = self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
<|reserved_special_token_0|>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
<|reserved_special_token_1|>
class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
def add(self):
self._d = {}
self._d['CustomerName'] = self._name
self._d['CustomerPhonenumber'] = self._pno
self._d['CustomerAddress'] = self._add
self._d['CustomerPin'] = self._pin
self._d['CustomerAccountNumber'] = self._acc
self._d['CustomerBalance'] = self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
def withdraw(self):
amt = int(input('Enter Withdraw amount : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
<|reserved_special_token_1|>
#2) write a program to make banking system develop business logic
#in one module and call functionality in another .py file
class Customer: #user defined class
def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments
self._name=name
self._pno=phoneno
self._add=address
self._pin=pin
self._acc=accno
self._bal=balance#protected variable
def add(self) : #user defined method
self._d={} #create empty dictionary
self._d['CustomerName']=self._name #add values to the dictionary using key names
self._d['CustomerPhonenumber']=self._pno
self._d['CustomerAddress']=self._add
self._d['CustomerPin']=self._pin
self._d['CustomerAccountNumber']=self._acc
self._d['CustomerBalance']=self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt=int(input('Enter Deposit amount : '))
self._d['CustomerBalance']+=amt
print('Your a/c is credited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def withdraw(self):
amt=int(input('Enter Withdraw amount : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def transfer(self):
name=input('Enter Recipient name : ')
acc=input('Enter account number : ')
if len(acc)==16:
amt=int(input('Enter amount to transfer : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ',self._d['CustomerName'])
print('Account Balance is ',self._d['CustomerBalance'])
print()
def __del__(self): #destructor
print('Thank You')
pass
|
flexible
|
{
"blob_id": "cf5a9b8dad5a02610fa5ce2a849b6f9fc50a0aa8",
"index": 1872,
"step-1": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n <mask token>\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-2": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-3": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-4": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def withdraw(self):\n amt = int(input('Enter Withdraw amount : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-5": "#2) write a program to make banking system develop business logic\r\n#in one module and call functionality in another .py file\r\n\r\nclass Customer: #user defined class\r\n def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments\r\n self._name=name \r\n self._pno=phoneno\r\n self._add=address\r\n self._pin=pin\r\n self._acc=accno\r\n self._bal=balance#protected variable\r\n def add(self) : #user defined method\r\n self._d={} #create empty dictionary\r\n self._d['CustomerName']=self._name #add values to the dictionary using key names\r\n self._d['CustomerPhonenumber']=self._pno\r\n self._d['CustomerAddress']=self._add\r\n self._d['CustomerPin']=self._pin\r\n self._d['CustomerAccountNumber']=self._acc\r\n self._d['CustomerBalance']=self._bal\r\n print('Customer Details Add Successfully')\r\n def deposit(self):\r\n amt=int(input('Enter Deposit amount : '))\r\n self._d['CustomerBalance']+=amt\r\n print('Your a/c is credited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def withdraw(self):\r\n amt=int(input('Enter Withdraw amount : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def transfer(self):\r\n name=input('Enter Recipient name : ')\r\n acc=input('Enter account number : ')\r\n if len(acc)==16:\r\n amt=int(input('Enter amount to transfer : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Transfer amount successfully')\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n print('Invalid Account Number\\n')\r\n def mini(self):\r\n print('Name : ',self._d['CustomerName'])\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def __del__(self): #destructor\r\n print('Thank You')\r\n pass\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.contrib import admin
from get_my_tweets.models import username
admin.site.register(username)
|
normal
|
{
"blob_id": "84ece5d1a9e38b83a5b60052fc3ab089c498d2fc",
"index": 9147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(username)\n",
"step-3": "from django.contrib import admin\nfrom get_my_tweets.models import username\nadmin.site.register(username)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity['agrave'] = 'à'
parser.entity['uuml'] = 'ü'
parser.entity['Eacute'] = 'É'
parser.entity['eacute'] = 'é'
parser.entity['aacute'] = 'á'
parser.entity['iacute'] = 'í'
parser.entity['ouml'] = 'ö'
parser.entity['ccedil'] = 'ç'
parser.entity['egrave'] = 'è'
parser.entity['auml'] = 'ä'
parser.entity['uacute'] = 'ú'
parser.entity['aring'] = 'å'
parser.entity['oacute'] = 'ó'
parser.entity['szlig'] = 'ß'
parser.entity['oslash'] = 'ø'
parser.entity['yacute'] = 'ỳ'
parser.entity['iuml'] = 'ï'
parser.entity['igrave'] = 'í'
parser.entity['ocirc'] = 'ô'
parser.entity['icirc'] = 'î'
parser.entity['Uuml'] = 'Ü'
parser.entity['euml'] = 'ë'
parser.entity['acirc'] = 'â'
parser.entity['atilde'] = 'ã'
parser.entity['Uacute'] = 'Ù'
parser.entity['Aacute'] = 'À'
parser.entity['ntilde'] = 'ñ'
parser.entity['Auml'] = 'Ä'
parser.entity['Oslash'] = 'Ø'
parser.entity['Ccedil'] = 'Ç'
parser.entity['otilde'] = 'õ'
parser.entity['ecirc'] = 'ê'
parser.entity['times'] = '×'
parser.entity['Ouml'] = 'Ö'
parser.entity['reg'] = '®'
parser.entity['Aring'] = 'Å'
parser.entity['Oacute'] = 'Ò'
parser.entity['ograve'] = 'ó'
parser.entity['yuml'] = 'ÿ'
parser.entity['eth'] = 'ð'
parser.entity['aelig'] = 'æ'
parser.entity['AElig'] = 'Æ'
parser.entity['Agrave'] = 'Á'
parser.entity['Iuml'] = 'Ï'
parser.entity['micro'] = 'µ'
parser.entity['Acirc'] = 'Â'
parser.entity['Otilde'] = 'Õ'
parser.entity['Egrave'] = 'É'
parser.entity['ETH'] = 'Ð'
parser.entity['ugrave'] = 'ú'
parser.entity['ucirc'] = 'û'
parser.entity['thorn'] = 'þ'
parser.entity['THORN'] = 'Þ'
parser.entity['Iacute'] = 'Ì'
parser.entity['Icirc'] = 'Î'
parser.entity['Ntilde'] = 'Ñ'
parser.entity['Ecirc'] = 'Ê'
parser.entity['Ocirc'] = 'Ô'
parser.entity['Ograve'] = 'Ó'
parser.entity['Igrave'] = 'Í'
parser.entity['Atilde'] = 'Ã'
parser.entity['Yacute'] = 'Ỳ'
parser.entity['Ucirc'] = 'Û'
parser.entity['Euml'] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
with open('../../../data/result' + str(self.filenum) + '.txt', 'w'
) as out:
out.writelines('')
for child1 in e:
if (doc_number / tot_docs > 0.5) & (not mitad):
print('50% de los documentos procesados en el thread',
str(self.filenum))
mitad = True
if (doc_number / tot_docs > 0.9) & (not max_mitad):
print('90% de los documentos procesados en el thread',
str(self.filenum))
max_mitad = True
if (doc_number / tot_docs == 1.0) & (not complete):
print('100% de los documentos procesados en el thread',
str(self.filenum))
complete = True
if child1.tag in docs:
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if child2.tag in tags:
if child2.tag == 'author':
dicc_aut = dict()
dicc_aut['Nombre'] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == 'title':
d['Title'] = child2.text
elif child2.tag == 'year':
d['Year'] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity['agrave'] = 'à'
parser.entity['uuml'] = 'ü'
parser.entity['Eacute'] = 'É'
parser.entity['eacute'] = 'é'
parser.entity['aacute'] = 'á'
parser.entity['iacute'] = 'í'
parser.entity['ouml'] = 'ö'
parser.entity['ccedil'] = 'ç'
parser.entity['egrave'] = 'è'
parser.entity['auml'] = 'ä'
parser.entity['uacute'] = 'ú'
parser.entity['aring'] = 'å'
parser.entity['oacute'] = 'ó'
parser.entity['szlig'] = 'ß'
parser.entity['oslash'] = 'ø'
parser.entity['yacute'] = 'ỳ'
parser.entity['iuml'] = 'ï'
parser.entity['igrave'] = 'í'
parser.entity['ocirc'] = 'ô'
parser.entity['icirc'] = 'î'
parser.entity['Uuml'] = 'Ü'
parser.entity['euml'] = 'ë'
parser.entity['acirc'] = 'â'
parser.entity['atilde'] = 'ã'
parser.entity['Uacute'] = 'Ù'
parser.entity['Aacute'] = 'À'
parser.entity['ntilde'] = 'ñ'
parser.entity['Auml'] = 'Ä'
parser.entity['Oslash'] = 'Ø'
parser.entity['Ccedil'] = 'Ç'
parser.entity['otilde'] = 'õ'
parser.entity['ecirc'] = 'ê'
parser.entity['times'] = '×'
parser.entity['Ouml'] = 'Ö'
parser.entity['reg'] = '®'
parser.entity['Aring'] = 'Å'
parser.entity['Oacute'] = 'Ò'
parser.entity['ograve'] = 'ó'
parser.entity['yuml'] = 'ÿ'
parser.entity['eth'] = 'ð'
parser.entity['aelig'] = 'æ'
parser.entity['AElig'] = 'Æ'
parser.entity['Agrave'] = 'Á'
parser.entity['Iuml'] = 'Ï'
parser.entity['micro'] = 'µ'
parser.entity['Acirc'] = 'Â'
parser.entity['Otilde'] = 'Õ'
parser.entity['Egrave'] = 'É'
parser.entity['ETH'] = 'Ð'
parser.entity['ugrave'] = 'ú'
parser.entity['ucirc'] = 'û'
parser.entity['thorn'] = 'þ'
parser.entity['THORN'] = 'Þ'
parser.entity['Iacute'] = 'Ì'
parser.entity['Icirc'] = 'Î'
parser.entity['Ntilde'] = 'Ñ'
parser.entity['Ecirc'] = 'Ê'
parser.entity['Ocirc'] = 'Ô'
parser.entity['Ograve'] = 'Ó'
parser.entity['Igrave'] = 'Í'
parser.entity['Atilde'] = 'Ã'
parser.entity['Yacute'] = 'Ỳ'
parser.entity['Ucirc'] = 'Û'
parser.entity['Euml'] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
with open('../../../data/result' + str(self.filenum) + '.txt', 'w'
) as out:
out.writelines('')
for child1 in e:
if (doc_number / tot_docs > 0.5) & (not mitad):
print('50% de los documentos procesados en el thread',
str(self.filenum))
mitad = True
if (doc_number / tot_docs > 0.9) & (not max_mitad):
print('90% de los documentos procesados en el thread',
str(self.filenum))
max_mitad = True
if (doc_number / tot_docs == 1.0) & (not complete):
print('100% de los documentos procesados en el thread',
str(self.filenum))
complete = True
if child1.tag in docs:
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if child2.tag in tags:
if child2.tag == 'author':
dicc_aut = dict()
dicc_aut['Nombre'] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == 'title':
d['Title'] = child2.text
elif child2.tag == 'year':
d['Year'] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
for i in range(7):
MyThread(i).start()
<|reserved_special_token_1|>
import xml.etree.ElementTree as ET
from collections import OrderedDict
import json
import threading
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity['agrave'] = 'à'
parser.entity['uuml'] = 'ü'
parser.entity['Eacute'] = 'É'
parser.entity['eacute'] = 'é'
parser.entity['aacute'] = 'á'
parser.entity['iacute'] = 'í'
parser.entity['ouml'] = 'ö'
parser.entity['ccedil'] = 'ç'
parser.entity['egrave'] = 'è'
parser.entity['auml'] = 'ä'
parser.entity['uacute'] = 'ú'
parser.entity['aring'] = 'å'
parser.entity['oacute'] = 'ó'
parser.entity['szlig'] = 'ß'
parser.entity['oslash'] = 'ø'
parser.entity['yacute'] = 'ỳ'
parser.entity['iuml'] = 'ï'
parser.entity['igrave'] = 'í'
parser.entity['ocirc'] = 'ô'
parser.entity['icirc'] = 'î'
parser.entity['Uuml'] = 'Ü'
parser.entity['euml'] = 'ë'
parser.entity['acirc'] = 'â'
parser.entity['atilde'] = 'ã'
parser.entity['Uacute'] = 'Ù'
parser.entity['Aacute'] = 'À'
parser.entity['ntilde'] = 'ñ'
parser.entity['Auml'] = 'Ä'
parser.entity['Oslash'] = 'Ø'
parser.entity['Ccedil'] = 'Ç'
parser.entity['otilde'] = 'õ'
parser.entity['ecirc'] = 'ê'
parser.entity['times'] = '×'
parser.entity['Ouml'] = 'Ö'
parser.entity['reg'] = '®'
parser.entity['Aring'] = 'Å'
parser.entity['Oacute'] = 'Ò'
parser.entity['ograve'] = 'ó'
parser.entity['yuml'] = 'ÿ'
parser.entity['eth'] = 'ð'
parser.entity['aelig'] = 'æ'
parser.entity['AElig'] = 'Æ'
parser.entity['Agrave'] = 'Á'
parser.entity['Iuml'] = 'Ï'
parser.entity['micro'] = 'µ'
parser.entity['Acirc'] = 'Â'
parser.entity['Otilde'] = 'Õ'
parser.entity['Egrave'] = 'É'
parser.entity['ETH'] = 'Ð'
parser.entity['ugrave'] = 'ú'
parser.entity['ucirc'] = 'û'
parser.entity['thorn'] = 'þ'
parser.entity['THORN'] = 'Þ'
parser.entity['Iacute'] = 'Ì'
parser.entity['Icirc'] = 'Î'
parser.entity['Ntilde'] = 'Ñ'
parser.entity['Ecirc'] = 'Ê'
parser.entity['Ocirc'] = 'Ô'
parser.entity['Ograve'] = 'Ó'
parser.entity['Igrave'] = 'Í'
parser.entity['Atilde'] = 'Ã'
parser.entity['Yacute'] = 'Ỳ'
parser.entity['Ucirc'] = 'Û'
parser.entity['Euml'] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
with open('../../../data/result' + str(self.filenum) + '.txt', 'w'
) as out:
out.writelines('')
for child1 in e:
if (doc_number / tot_docs > 0.5) & (not mitad):
print('50% de los documentos procesados en el thread',
str(self.filenum))
mitad = True
if (doc_number / tot_docs > 0.9) & (not max_mitad):
print('90% de los documentos procesados en el thread',
str(self.filenum))
max_mitad = True
if (doc_number / tot_docs == 1.0) & (not complete):
print('100% de los documentos procesados en el thread',
str(self.filenum))
complete = True
if child1.tag in docs:
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if child2.tag in tags:
if child2.tag == 'author':
dicc_aut = dict()
dicc_aut['Nombre'] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == 'title':
d['Title'] = child2.text
elif child2.tag == 'year':
d['Year'] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
for i in range(7):
MyThread(i).start()
<|reserved_special_token_1|>
import xml.etree.ElementTree as ET
from collections import OrderedDict
import json
import threading
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity["agrave"] = 'à'
parser.entity["uuml"] = 'ü'
parser.entity["Eacute"] = 'É'
parser.entity["eacute"] = 'é'
parser.entity["aacute"] = 'á'
parser.entity["iacute"] = 'í'
parser.entity["ouml"] = 'ö'
parser.entity["ccedil"] = 'ç'
parser.entity["egrave"] = 'è'
parser.entity["auml"] = 'ä'
parser.entity["uacute"] = 'ú'
parser.entity["aring"] = 'å'
parser.entity["oacute"] = 'ó'
parser.entity["szlig"] = 'ß'
parser.entity["oslash"] = 'ø'
parser.entity["yacute"] = 'ỳ'
parser.entity["iuml"] = 'ï'
parser.entity["igrave"] = 'í'
parser.entity["ocirc"] = 'ô'
parser.entity["icirc"] = 'î'
parser.entity["Uuml"] = 'Ü'
parser.entity["euml"] = 'ë'
parser.entity["acirc"] = 'â'
parser.entity["atilde"] = 'ã'
parser.entity["Uacute"] = 'Ù'
parser.entity["Aacute"] = 'À'
parser.entity["ntilde"] = 'ñ'
parser.entity["Auml"] = 'Ä'
parser.entity["Oslash"] = 'Ø'
parser.entity["Ccedil"] = 'Ç'
parser.entity["otilde"] = 'õ'
parser.entity["ecirc"] = 'ê'
parser.entity["times"] = '×'
parser.entity["Ouml"] = 'Ö'
parser.entity["reg"] = '®'
parser.entity["Aring"] = 'Å'
parser.entity["Oacute"] = 'Ò'
parser.entity["ograve"] = 'ó'
parser.entity["yuml"] = 'ÿ'
parser.entity["eth"] = 'ð'
parser.entity["aelig"] = 'æ'
parser.entity["AElig"] = 'Æ'
parser.entity["Agrave"] = 'Á'
parser.entity["Iuml"] = 'Ï'
parser.entity["micro"] = 'µ'
parser.entity["Acirc"] = 'Â'
parser.entity["Otilde"] = 'Õ'
parser.entity["Egrave"] = 'É'
parser.entity["ETH"] = 'Ð'
parser.entity["ugrave"] = 'ú'
parser.entity["ucirc"] = 'û'
parser.entity["thorn"] = 'þ'
parser.entity["THORN"] = 'Þ'
parser.entity["Iacute"] = 'Ì'
parser.entity["Icirc"] = 'Î'
parser.entity["Ntilde"] = 'Ñ'
parser.entity["Ecirc"] = 'Ê'
parser.entity["Ocirc"] = 'Ô'
parser.entity["Ograve"] = 'Ó'
parser.entity["Igrave"] = 'Í'
parser.entity["Atilde"] = 'Ã'
parser.entity["Yacute"] = 'Ỳ'
parser.entity["Ucirc"] = 'Û'
parser.entity["Euml"] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
# Borrado previo del fichero de resultados
with open('../../../data/result' + str(self.filenum) +'.txt', 'w') as out:
out.writelines('')
# Almacenamiento de valores en dicc para volcado posterior a json
for child1 in e:
if ((doc_number / tot_docs > 0.5) & (not mitad)):
print('50% de los documentos procesados en el thread',str(self.filenum))
mitad = True
if ((doc_number / tot_docs > 0.9) & (not max_mitad)):
print('90% de los documentos procesados en el thread',str(self.filenum))
max_mitad = True
if ((doc_number / tot_docs == 1.0) & (not complete)):
print('100% de los documentos procesados en el thread',str(self.filenum))
complete = True
if (child1.tag in docs):
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if (child2.tag in tags):
if (child2.tag == 'author'):
dicc_aut = dict()
dicc_aut["Nombre"] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == "title":
d["Title"] = child2.text
elif child2.tag == "year":
d["Year"] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
for i in range(7):
MyThread(i).start()
|
flexible
|
{
"blob_id": "9150eb53d309e75299775cd9524a688e8dc2ff76",
"index": 4210,
"step-1": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\nfor i in range(7):\n MyThread(i).start()\n",
"step-4": "import xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport json\nimport threading\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\nfor i in range(7):\n MyThread(i).start()\n",
"step-5": "import xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport json\nimport threading\n\nclass MyThread(threading.Thread):\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n\n parser.entity[\"agrave\"] = 'à'\n parser.entity[\"uuml\"] = 'ü'\n parser.entity[\"Eacute\"] = 'É'\n parser.entity[\"eacute\"] = 'é'\n parser.entity[\"aacute\"] = 'á'\n parser.entity[\"iacute\"] = 'í'\n parser.entity[\"ouml\"] = 'ö'\n parser.entity[\"ccedil\"] = 'ç'\n parser.entity[\"egrave\"] = 'è'\n parser.entity[\"auml\"] = 'ä'\n parser.entity[\"uacute\"] = 'ú'\n parser.entity[\"aring\"] = 'å'\n parser.entity[\"oacute\"] = 'ó'\n parser.entity[\"szlig\"] = 'ß'\n parser.entity[\"oslash\"] = 'ø'\n parser.entity[\"yacute\"] = 'ỳ'\n parser.entity[\"iuml\"] = 'ï'\n parser.entity[\"igrave\"] = 'í'\n parser.entity[\"ocirc\"] = 'ô'\n parser.entity[\"icirc\"] = 'î'\n parser.entity[\"Uuml\"] = 'Ü'\n parser.entity[\"euml\"] = 'ë'\n parser.entity[\"acirc\"] = 'â'\n parser.entity[\"atilde\"] = 'ã'\n parser.entity[\"Uacute\"] = 'Ù'\n parser.entity[\"Aacute\"] = 'À'\n parser.entity[\"ntilde\"] = 'ñ'\n parser.entity[\"Auml\"] = 'Ä'\n parser.entity[\"Oslash\"] = 'Ø'\n parser.entity[\"Ccedil\"] = 'Ç'\n parser.entity[\"otilde\"] = 'õ'\n parser.entity[\"ecirc\"] = 'ê'\n parser.entity[\"times\"] = '×'\n parser.entity[\"Ouml\"] = 'Ö'\n parser.entity[\"reg\"] = '®'\n parser.entity[\"Aring\"] = 'Å'\n parser.entity[\"Oacute\"] = 'Ò'\n parser.entity[\"ograve\"] = 'ó'\n parser.entity[\"yuml\"] = 'ÿ'\n parser.entity[\"eth\"] = 'ð'\n parser.entity[\"aelig\"] = 'æ'\n parser.entity[\"AElig\"] = 'Æ'\n parser.entity[\"Agrave\"] = 'Á'\n parser.entity[\"Iuml\"] = 'Ï'\n parser.entity[\"micro\"] = 'µ'\n parser.entity[\"Acirc\"] = 'Â'\n parser.entity[\"Otilde\"] = 'Õ'\n parser.entity[\"Egrave\"] = 'É'\n parser.entity[\"ETH\"] = 'Ð'\n parser.entity[\"ugrave\"] = 'ú'\n parser.entity[\"ucirc\"] = 'û'\n parser.entity[\"thorn\"] = 'þ'\n parser.entity[\"THORN\"] = 'Þ'\n parser.entity[\"Iacute\"] = 'Ì'\n parser.entity[\"Icirc\"] = 'Î'\n parser.entity[\"Ntilde\"] = 'Ñ'\n parser.entity[\"Ecirc\"] = 'Ê'\n parser.entity[\"Ocirc\"] = 'Ô'\n parser.entity[\"Ograve\"] = 'Ó'\n parser.entity[\"Igrave\"] = 'Í'\n parser.entity[\"Atilde\"] = 'Ã'\n parser.entity[\"Yacute\"] = 'Ỳ'\n parser.entity[\"Ucirc\"] = 'Û'\n parser.entity[\"Euml\"] = 'Ë'\n\n\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n\n e = ET.parse(xml_file, parser=parser).getroot()\n\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n\n # Borrado previo del fichero de resultados\n with open('../../../data/result' + str(self.filenum) +'.txt', 'w') as out:\n out.writelines('')\n\n # Almacenamiento de valores en dicc para volcado posterior a json\n for child1 in e:\n if ((doc_number / tot_docs > 0.5) & (not mitad)):\n print('50% de los documentos procesados en el thread',str(self.filenum))\n mitad = True\n if ((doc_number / tot_docs > 0.9) & (not max_mitad)):\n print('90% de los documentos procesados en el thread',str(self.filenum))\n max_mitad = True\n if ((doc_number / tot_docs == 1.0) & (not complete)):\n print('100% de los documentos procesados en el thread',str(self.filenum))\n complete = True\n if (child1.tag in docs):\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if (child2.tag in tags):\n if (child2.tag == 'author'):\n dicc_aut = dict()\n dicc_aut[\"Nombre\"] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == \"title\":\n d[\"Title\"] = child2.text\n elif child2.tag == \"year\":\n d[\"Year\"] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\nfor i in range(7):\n MyThread(i).start()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetCommunitiesByOffsetService(IService):
<|reserved_special_token_0|>
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
<|reserved_special_token_1|>
from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields("Communities", self.parameters["start"], self.parameters["offset"])
|
flexible
|
{
"blob_id": "051bd11c42815ec8f8ece8eae9d33890da77129c",
"index": 148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n <mask token>\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-3": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-4": "from services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\nclass GetCommunitiesByOffsetService(IService):\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields(\"Communities\", self.parameters[\"start\"], self.parameters[\"offset\"])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from utils import *
from Dataset.input_pipe import *
from Learning.tf_multipath_classifier import *
def config_graph():
paths = []
path = {}
path['input_dim'] = 4116
path['name'] = 'shared1'
path['computation'] = construct_path(path['name'], [512, 512], batch_norm=False, dropout=True, dropout_rate=0.5, noise=False, noise_std=0.16)
path['input'] = 'organic'
paths.append(path)
path = {}
path['name'] = 'aspects'
path['input'] = 'shared1'
path['input_dim'] = 512
path['computation'] = construct_path(path['name'], [11], batch_norm=False, activation=None)
path['optimizer'] = tf.train.AdamOptimizer(name='optimizer', learning_rate=0.0001 , beta1=0.92 , beta2=0.9999)
path['loss'] = loss_map('sigmoid')
path['predictor'] = sigmoid_predictor()
paths.append(path)
return paths
org_dict_full = prep_organic_aspects()
dataset_size = len(org_dict_full['train_data'])
folds = 10
fold_size= ceil(dataset_size / folds)
avg_f1 = 0
for f in range(0,folds):
fold_start = f * fold_size
fold_end = min((f+1) * fold_size, dataset_size )
print(fold_start, fold_end)
org_dict = fold_data_dict(org_dict_full, fold_start, fold_end )
datasets = []
dataset = {}
dataset['name'] = 'organic'
# dataset['holdout'] = 50
dataset['batch_size'] = 10
dataset['features'] = org_dict['train_vecs']
dataset['type'] = tf.float32
dataset['tasks'] = [{'name' : 'aspects', 'features' : org_dict['encoded_train_labels'], 'type': tf.float32}]
datasets.append(dataset)
paths = config_graph()
params = {}
params['train_iter'] = 4001
model = TfMultiPathClassifier(datasets, paths, params)
model.train()
model.save()
y = model.get_prediciton('aspects', org_dict['test_vecs'])
x = model.get_prediciton('aspects', org_dict['train_vecs'])
multi_label_metrics(x, org_dict['train_labels'], org_dict['encoded_train_labels'],
org_dict['labeling'], org_dict['train_data'] )
_, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict['encoded_test_labels'],
org_dict['labeling'], org_dict['test_data'], mute=True )
avg_f1 +=f1
avg_f1 = avg_f1 / folds
print('\n--------------------------------------------------------------------------\nAverage F1 score:', avg_f1)
|
normal
|
{
"blob_id": "8039430f1b65cc76f9a78b1094f110de29f0f965",
"index": 4885,
"step-1": "<mask token>\n\n\ndef config_graph():\n paths = []\n path = {}\n path['input_dim'] = 4116\n path['name'] = 'shared1'\n path['computation'] = construct_path(path['name'], [512, 512],\n batch_norm=False, dropout=True, dropout_rate=0.5, noise=False,\n noise_std=0.16)\n path['input'] = 'organic'\n paths.append(path)\n path = {}\n path['name'] = 'aspects'\n path['input'] = 'shared1'\n path['input_dim'] = 512\n path['computation'] = construct_path(path['name'], [11], batch_norm=\n False, activation=None)\n path['optimizer'] = tf.train.AdamOptimizer(name='optimizer',\n learning_rate=0.0001, beta1=0.92, beta2=0.9999)\n path['loss'] = loss_map('sigmoid')\n path['predictor'] = sigmoid_predictor()\n paths.append(path)\n return paths\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef config_graph():\n paths = []\n path = {}\n path['input_dim'] = 4116\n path['name'] = 'shared1'\n path['computation'] = construct_path(path['name'], [512, 512],\n batch_norm=False, dropout=True, dropout_rate=0.5, noise=False,\n noise_std=0.16)\n path['input'] = 'organic'\n paths.append(path)\n path = {}\n path['name'] = 'aspects'\n path['input'] = 'shared1'\n path['input_dim'] = 512\n path['computation'] = construct_path(path['name'], [11], batch_norm=\n False, activation=None)\n path['optimizer'] = tf.train.AdamOptimizer(name='optimizer',\n learning_rate=0.0001, beta1=0.92, beta2=0.9999)\n path['loss'] = loss_map('sigmoid')\n path['predictor'] = sigmoid_predictor()\n paths.append(path)\n return paths\n\n\n<mask token>\nfor f in range(0, folds):\n fold_start = f * fold_size\n fold_end = min((f + 1) * fold_size, dataset_size)\n print(fold_start, fold_end)\n org_dict = fold_data_dict(org_dict_full, fold_start, fold_end)\n datasets = []\n dataset = {}\n dataset['name'] = 'organic'\n dataset['batch_size'] = 10\n dataset['features'] = org_dict['train_vecs']\n dataset['type'] = tf.float32\n dataset['tasks'] = [{'name': 'aspects', 'features': org_dict[\n 'encoded_train_labels'], 'type': tf.float32}]\n datasets.append(dataset)\n paths = config_graph()\n params = {}\n params['train_iter'] = 4001\n model = TfMultiPathClassifier(datasets, paths, params)\n model.train()\n model.save()\n y = model.get_prediciton('aspects', org_dict['test_vecs'])\n x = model.get_prediciton('aspects', org_dict['train_vecs'])\n multi_label_metrics(x, org_dict['train_labels'], org_dict[\n 'encoded_train_labels'], org_dict['labeling'], org_dict['train_data'])\n _, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict[\n 'encoded_test_labels'], org_dict['labeling'], org_dict['test_data'],\n mute=True)\n avg_f1 += f1\n<mask token>\nprint(\n \"\"\"\n--------------------------------------------------------------------------\nAverage F1 score:\"\"\"\n , avg_f1)\n",
"step-3": "<mask token>\n\n\ndef config_graph():\n paths = []\n path = {}\n path['input_dim'] = 4116\n path['name'] = 'shared1'\n path['computation'] = construct_path(path['name'], [512, 512],\n batch_norm=False, dropout=True, dropout_rate=0.5, noise=False,\n noise_std=0.16)\n path['input'] = 'organic'\n paths.append(path)\n path = {}\n path['name'] = 'aspects'\n path['input'] = 'shared1'\n path['input_dim'] = 512\n path['computation'] = construct_path(path['name'], [11], batch_norm=\n False, activation=None)\n path['optimizer'] = tf.train.AdamOptimizer(name='optimizer',\n learning_rate=0.0001, beta1=0.92, beta2=0.9999)\n path['loss'] = loss_map('sigmoid')\n path['predictor'] = sigmoid_predictor()\n paths.append(path)\n return paths\n\n\norg_dict_full = prep_organic_aspects()\ndataset_size = len(org_dict_full['train_data'])\nfolds = 10\nfold_size = ceil(dataset_size / folds)\navg_f1 = 0\nfor f in range(0, folds):\n fold_start = f * fold_size\n fold_end = min((f + 1) * fold_size, dataset_size)\n print(fold_start, fold_end)\n org_dict = fold_data_dict(org_dict_full, fold_start, fold_end)\n datasets = []\n dataset = {}\n dataset['name'] = 'organic'\n dataset['batch_size'] = 10\n dataset['features'] = org_dict['train_vecs']\n dataset['type'] = tf.float32\n dataset['tasks'] = [{'name': 'aspects', 'features': org_dict[\n 'encoded_train_labels'], 'type': tf.float32}]\n datasets.append(dataset)\n paths = config_graph()\n params = {}\n params['train_iter'] = 4001\n model = TfMultiPathClassifier(datasets, paths, params)\n model.train()\n model.save()\n y = model.get_prediciton('aspects', org_dict['test_vecs'])\n x = model.get_prediciton('aspects', org_dict['train_vecs'])\n multi_label_metrics(x, org_dict['train_labels'], org_dict[\n 'encoded_train_labels'], org_dict['labeling'], org_dict['train_data'])\n _, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict[\n 'encoded_test_labels'], org_dict['labeling'], org_dict['test_data'],\n mute=True)\n avg_f1 += f1\navg_f1 = avg_f1 / folds\nprint(\n \"\"\"\n--------------------------------------------------------------------------\nAverage F1 score:\"\"\"\n , avg_f1)\n",
"step-4": "from utils import *\nfrom Dataset.input_pipe import *\nfrom Learning.tf_multipath_classifier import *\n\n\ndef config_graph():\n paths = []\n path = {}\n path['input_dim'] = 4116\n path['name'] = 'shared1'\n path['computation'] = construct_path(path['name'], [512, 512],\n batch_norm=False, dropout=True, dropout_rate=0.5, noise=False,\n noise_std=0.16)\n path['input'] = 'organic'\n paths.append(path)\n path = {}\n path['name'] = 'aspects'\n path['input'] = 'shared1'\n path['input_dim'] = 512\n path['computation'] = construct_path(path['name'], [11], batch_norm=\n False, activation=None)\n path['optimizer'] = tf.train.AdamOptimizer(name='optimizer',\n learning_rate=0.0001, beta1=0.92, beta2=0.9999)\n path['loss'] = loss_map('sigmoid')\n path['predictor'] = sigmoid_predictor()\n paths.append(path)\n return paths\n\n\norg_dict_full = prep_organic_aspects()\ndataset_size = len(org_dict_full['train_data'])\nfolds = 10\nfold_size = ceil(dataset_size / folds)\navg_f1 = 0\nfor f in range(0, folds):\n fold_start = f * fold_size\n fold_end = min((f + 1) * fold_size, dataset_size)\n print(fold_start, fold_end)\n org_dict = fold_data_dict(org_dict_full, fold_start, fold_end)\n datasets = []\n dataset = {}\n dataset['name'] = 'organic'\n dataset['batch_size'] = 10\n dataset['features'] = org_dict['train_vecs']\n dataset['type'] = tf.float32\n dataset['tasks'] = [{'name': 'aspects', 'features': org_dict[\n 'encoded_train_labels'], 'type': tf.float32}]\n datasets.append(dataset)\n paths = config_graph()\n params = {}\n params['train_iter'] = 4001\n model = TfMultiPathClassifier(datasets, paths, params)\n model.train()\n model.save()\n y = model.get_prediciton('aspects', org_dict['test_vecs'])\n x = model.get_prediciton('aspects', org_dict['train_vecs'])\n multi_label_metrics(x, org_dict['train_labels'], org_dict[\n 'encoded_train_labels'], org_dict['labeling'], org_dict['train_data'])\n _, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict[\n 'encoded_test_labels'], org_dict['labeling'], org_dict['test_data'],\n mute=True)\n avg_f1 += f1\navg_f1 = avg_f1 / folds\nprint(\n \"\"\"\n--------------------------------------------------------------------------\nAverage F1 score:\"\"\"\n , avg_f1)\n",
"step-5": "from utils import *\nfrom Dataset.input_pipe import *\nfrom Learning.tf_multipath_classifier import *\n\n\ndef config_graph():\n\tpaths = []\n\n\tpath = {}\n\tpath['input_dim'] = 4116\n\tpath['name'] = 'shared1'\n\tpath['computation'] = construct_path(path['name'], [512, 512], batch_norm=False, dropout=True, dropout_rate=0.5, noise=False, noise_std=0.16)\n\tpath['input'] = 'organic'\n\tpaths.append(path)\n\n\tpath = {}\n\tpath['name'] = 'aspects'\n\tpath['input'] = 'shared1'\n\tpath['input_dim'] = 512\n\tpath['computation'] = construct_path(path['name'], [11], batch_norm=False, activation=None)\n\tpath['optimizer'] = tf.train.AdamOptimizer(name='optimizer', learning_rate=0.0001 , beta1=0.92 , beta2=0.9999)\n\tpath['loss'] = loss_map('sigmoid')\n\tpath['predictor'] = sigmoid_predictor()\n\tpaths.append(path)\n\n\treturn paths\n\n\norg_dict_full = prep_organic_aspects()\ndataset_size = len(org_dict_full['train_data'])\n\nfolds = 10\nfold_size= ceil(dataset_size / folds)\navg_f1 = 0\nfor f in range(0,folds):\n\tfold_start = f * fold_size\n\tfold_end = min((f+1) * fold_size, dataset_size )\n\tprint(fold_start, fold_end)\n\torg_dict = fold_data_dict(org_dict_full, fold_start, fold_end )\n\n\tdatasets = []\n\tdataset = {}\n\tdataset['name'] = 'organic'\n\t# dataset['holdout'] = 50\n\tdataset['batch_size'] = 10\n\tdataset['features'] = org_dict['train_vecs']\n\tdataset['type'] = tf.float32\n\tdataset['tasks'] = [{'name' : 'aspects', 'features' : org_dict['encoded_train_labels'], 'type': tf.float32}]\n\tdatasets.append(dataset)\n\n\tpaths = config_graph()\n\tparams = {}\n\tparams['train_iter'] = 4001\n\n\tmodel = TfMultiPathClassifier(datasets, paths, params)\n\n\tmodel.train()\n\tmodel.save()\n\n\ty = model.get_prediciton('aspects', org_dict['test_vecs'])\n\tx = model.get_prediciton('aspects', org_dict['train_vecs'])\n\n\tmulti_label_metrics(x, org_dict['train_labels'], org_dict['encoded_train_labels'],\n\t\t\t\t\t\t\t\t\t\t\torg_dict['labeling'], org_dict['train_data'] )\n\n\t_, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict['encoded_test_labels'],\n\t\t\t\t\t\t\t\t\t\t\torg_dict['labeling'], org_dict['test_data'], mute=True )\n\tavg_f1 +=f1\n\navg_f1 = avg_f1 / folds\nprint('\\n--------------------------------------------------------------------------\\nAverage F1 score:', avg_f1)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def simulate():
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
obv = env.reset()
initial_action = 0
total_reward = 0
steps = 0
while done != True:
env.render()
if episode == 0:
action = initial_action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps += 1
print('Episode', episode, 'finished after', steps,
'time steps with total reward', total_reward)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def simulate():
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
obv = env.reset()
initial_action = 0
total_reward = 0
steps = 0
while done != True:
env.render()
if episode == 0:
action = initial_action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps += 1
print('Episode', episode, 'finished after', steps,
'time steps with total reward', total_reward)
if __name__ == '__main__':
simulate()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NUM_EPISODES = 1000
def simulate():
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
obv = env.reset()
initial_action = 0
total_reward = 0
steps = 0
while done != True:
env.render()
if episode == 0:
action = initial_action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps += 1
print('Episode', episode, 'finished after', steps,
'time steps with total reward', total_reward)
if __name__ == '__main__':
simulate()
<|reserved_special_token_1|>
import gym
NUM_EPISODES = 1000
def simulate():
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
obv = env.reset()
initial_action = 0
total_reward = 0
steps = 0
while done != True:
env.render()
if episode == 0:
action = initial_action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps += 1
print('Episode', episode, 'finished after', steps,
'time steps with total reward', total_reward)
if __name__ == '__main__':
simulate()
<|reserved_special_token_1|>
#CartPoleStarter
import gym
## Defining the simulation related constants
NUM_EPISODES = 1000
def simulate():
## Initialize the "Cart-Pole" environment
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
# Reset the environment
obv = env.reset()
initial_action = 0 #initial action is to move the cart to the left (arbitrary)
total_reward = 0
steps = 0
while done != True:
# render the simulation
env.render()
# Select an action
if episode == 0:
action = initial_action
# Execute the action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps +=1
#TODO:
#change the action here based on the obv
#make action = 0 (left) or action = 1 (right) based on if-statements
print("Episode", episode, "finished after", steps, "time steps with total reward", total_reward)
if __name__ == "__main__":
simulate()
|
flexible
|
{
"blob_id": "3c79c528cc19380af8f2883b9e35855e29b151a3",
"index": 7975,
"step-1": "<mask token>\n\n\ndef simulate():\n env = gym.make('CartPole-v0')\n for episode in range(NUM_EPISODES):\n done = False\n obv = env.reset()\n initial_action = 0\n total_reward = 0\n steps = 0\n while done != True:\n env.render()\n if episode == 0:\n action = initial_action\n obv, reward, done, _ = env.step(action)\n print(obv)\n total_reward += reward\n steps += 1\n print('Episode', episode, 'finished after', steps,\n 'time steps with total reward', total_reward)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simulate():\n env = gym.make('CartPole-v0')\n for episode in range(NUM_EPISODES):\n done = False\n obv = env.reset()\n initial_action = 0\n total_reward = 0\n steps = 0\n while done != True:\n env.render()\n if episode == 0:\n action = initial_action\n obv, reward, done, _ = env.step(action)\n print(obv)\n total_reward += reward\n steps += 1\n print('Episode', episode, 'finished after', steps,\n 'time steps with total reward', total_reward)\n\n\nif __name__ == '__main__':\n simulate()\n",
"step-3": "<mask token>\nNUM_EPISODES = 1000\n\n\ndef simulate():\n env = gym.make('CartPole-v0')\n for episode in range(NUM_EPISODES):\n done = False\n obv = env.reset()\n initial_action = 0\n total_reward = 0\n steps = 0\n while done != True:\n env.render()\n if episode == 0:\n action = initial_action\n obv, reward, done, _ = env.step(action)\n print(obv)\n total_reward += reward\n steps += 1\n print('Episode', episode, 'finished after', steps,\n 'time steps with total reward', total_reward)\n\n\nif __name__ == '__main__':\n simulate()\n",
"step-4": "import gym\nNUM_EPISODES = 1000\n\n\ndef simulate():\n env = gym.make('CartPole-v0')\n for episode in range(NUM_EPISODES):\n done = False\n obv = env.reset()\n initial_action = 0\n total_reward = 0\n steps = 0\n while done != True:\n env.render()\n if episode == 0:\n action = initial_action\n obv, reward, done, _ = env.step(action)\n print(obv)\n total_reward += reward\n steps += 1\n print('Episode', episode, 'finished after', steps,\n 'time steps with total reward', total_reward)\n\n\nif __name__ == '__main__':\n simulate()\n",
"step-5": "#CartPoleStarter\n\nimport gym\n\n## Defining the simulation related constants\nNUM_EPISODES = 1000\n\ndef simulate():\n ## Initialize the \"Cart-Pole\" environment\n env = gym.make('CartPole-v0')\n\n for episode in range(NUM_EPISODES):\n\n done = False\n # Reset the environment\n obv = env.reset()\n\n initial_action = 0 #initial action is to move the cart to the left (arbitrary)\n\n total_reward = 0\n steps = 0\n\n while done != True:\n # render the simulation\n env.render()\n\n # Select an action\n if episode == 0:\n action = initial_action\n\n # Execute the action\n obv, reward, done, _ = env.step(action)\n print(obv)\n\n\n total_reward += reward\n steps +=1\n\n #TODO:\n #change the action here based on the obv\n #make action = 0 (left) or action = 1 (right) based on if-statements\n\n print(\"Episode\", episode, \"finished after\", steps, \"time steps with total reward\", total_reward)\n\nif __name__ == \"__main__\":\n simulate()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestSunlumoProjectPrinter(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
<|reserved_special_token_1|>
from django.test import TestCase
from ..printer import Printer
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.test import TestCase
from ..printer import Printer
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({
'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines', 'points'],
'transparencies': [50, 0, 0]
})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
# we just want to test if the PDF file in not blank
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
|
flexible
|
{
"blob_id": "5e0cba6952cdc677c640a0df325426ffc89189cd",
"index": 658,
"step-1": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-4": "from django.test import TestCase\nfrom ..printer import Printer\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\n\nfrom ..printer import Printer\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({\n 'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines', 'points'],\n 'transparencies': [50, 0, 0]\n })\n\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n # we just want to test if the PDF file in not blank\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestOutwardCounterClockwise(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([1], actual)
<|reserved_special_token_0|>
def test_traverse_column_vector(self):
matrix = [[1], [2], [3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_traverse_wide_even_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)
<|reserved_special_token_0|>
def test_traverse_tall_odd_width_rectangle(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestOutwardCounterClockwise(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [[1], [2], [3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [[1, 2], [3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 4, 2, 1], actual)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_traverse_wide_even_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)
<|reserved_special_token_0|>
def test_traverse_tall_odd_width_rectangle(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestOutwardCounterClockwise(TestCase):
def test_traverse_empty(self):
matrix = []
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([], actual)
<|reserved_special_token_0|>
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [[1], [2], [3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [[1, 2], [3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 4, 2, 1], actual)
<|reserved_special_token_0|>
def test_traverse_wide_odd_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_wide_even_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_even_width_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,
16], [17, 18, 19, 20]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20,
16, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_odd_width_rectangle(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)
def test_traverse_large_matrix(self):
matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,
1000)]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])
self.assertEqual([3, 2, 1, 0], actual[-4:])
<|reserved_special_token_1|>
from unittest import TestCase
from spiral.spiral_matrix import SpiralMatrix
class TestOutwardCounterClockwise(TestCase):
def test_traverse_empty(self):
matrix = []
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([], actual)
def test_traverse_empty_vector(self):
matrix = [[]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([], actual)
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [[1], [2], [3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [[1, 2], [3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([3, 4, 2, 1], actual)
def test_traverse_odd_square(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)
def test_traverse_wide_odd_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_wide_even_height_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_even_width_rectangle(self):
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,
16], [17, 18, 19, 20]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20,
16, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_odd_width_rectangle(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)
def test_traverse_large_matrix(self):
matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,
1000)]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=
False)]
self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])
self.assertEqual([3, 2, 1, 0], actual[-4:])
<|reserved_special_token_1|>
from unittest import TestCase
from spiral.spiral_matrix import SpiralMatrix
class TestOutwardCounterClockwise(TestCase):
def test_traverse_empty(self):
matrix = []
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_empty_vector(self):
matrix = [[]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [
[1],
[2],
[3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [
[1, 2],
[3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 4, 2, 1], actual)
def test_traverse_odd_square(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)
def test_traverse_wide_odd_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_wide_even_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[5, 6, 7, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_even_width_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, 16, 12, 8, 4, 3, 2, 1],
actual)
def test_traverse_tall_odd_width_rectangle(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[8, 5, 4, 7, 10, 11, 12,9, 6, 3, 2, 1], actual)
def test_traverse_large_matrix(self):
matrix = [[i * 1000 + j for j in range(0, 1000)]
for i in range(0, 1000)]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])
self.assertEqual([3, 2, 1, 0],
actual[-4:])
|
flexible
|
{
"blob_id": "84f6336261e1c276f029822754842514715791df",
"index": 3604,
"step-1": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n <mask token>\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n <mask token>\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n <mask token>\n <mask token>\n <mask token>\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n <mask token>\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n <mask token>\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n <mask token>\n <mask token>\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n <mask token>\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n <mask token>\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,\n 16], [17, 18, 19, 20]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, \n 16, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,\n 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0], actual[-4:])\n",
"step-4": "from unittest import TestCase\nfrom spiral.spiral_matrix import SpiralMatrix\n\n\nclass TestOutwardCounterClockwise(TestCase):\n\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n\n def test_traverse_empty_vector(self):\n matrix = [[]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n\n def test_traverse_odd_square(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,\n 16], [17, 18, 19, 20]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, \n 16, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,\n 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0], actual[-4:])\n",
"step-5": "from unittest import TestCase\nfrom spiral.spiral_matrix import SpiralMatrix\n\n\nclass TestOutwardCounterClockwise(TestCase):\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([], actual)\n\n def test_traverse_empty_vector(self):\n matrix = [[]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([], actual)\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [\n [1],\n [2],\n [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [\n [1, 2],\n [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 4, 2, 1], actual)\n\n def test_traverse_odd_square(self):\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n [17, 18, 19, 20]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, 16, 12, 8, 4, 3, 2, 1],\n actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [8, 5, 4, 7, 10, 11, 12,9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[i * 1000 + j for j in range(0, 1000)]\n for i in range(0, 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0],\n actual[-4:])\n",
"step-ids": [
5,
7,
11,
14,
15
]
}
|
[
5,
7,
11,
14,
15
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 7 07:51:26 2017
@author: hcorrada
"""
from plagiarism_lib.article_db import ArticleDB
from plagiarism_lib.minhash import MinHash
from plagiarism_lib.lsh import LSH
import pandas as pd
import numpy as np
def _read_truthfile(filepath):
with open(filepath, 'r') as f:
truth_pairs = [tuple(sorted(line.strip().split()))
for line in f]
return set(truth_pairs)
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print (" returned: %d, tp=%.4f, prec=%.4f, rec=%.4f" % (len(candidate_pairs), tp, prec, rec))
return prec, rec
def run(mh, truthfile, ts):
truth_pairs = _read_truthfile(truthfile)
prec_series = []
rec_series = []
for t in ts:
print("Doing LSH with t=", t)
lsh = LSH(t)
lsh.do_lsh(mh)
candidate_pairs = set(lsh.get_candidates())
prec, rec = _get_stats(candidate_pairs, truth_pairs)
prec_series.append(prec)
rec_series.append(rec)
exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})
return exp_df
|
normal
|
{
"blob_id": "18b73a06c80272aff5c0e4b10473e95bd58466f3",
"index": 1197,
"step-1": "<mask token>\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n prec_series = []\n rec_series = []\n for t in ts:\n print('Doing LSH with t=', t)\n lsh = LSH(t)\n lsh.do_lsh(mh)\n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs)\n prec_series.append(prec)\n rec_series.append(rec)\n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n return exp_df\n",
"step-4": "<mask token>\nfrom plagiarism_lib.article_db import ArticleDB\nfrom plagiarism_lib.minhash import MinHash\nfrom plagiarism_lib.lsh import LSH\nimport pandas as pd\nimport numpy as np\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n prec_series = []\n rec_series = []\n for t in ts:\n print('Doing LSH with t=', t)\n lsh = LSH(t)\n lsh.do_lsh(mh)\n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs)\n prec_series.append(prec)\n rec_series.append(rec)\n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n return exp_df\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 7 07:51:26 2017\n\n@author: hcorrada\n\"\"\"\n\nfrom plagiarism_lib.article_db import ArticleDB\nfrom plagiarism_lib.minhash import MinHash\nfrom plagiarism_lib.lsh import LSH\n\nimport pandas as pd\nimport numpy as np\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split()))\n for line in f]\n return set(truth_pairs)\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs)) \n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print (\" returned: %d, tp=%.4f, prec=%.4f, rec=%.4f\" % (len(candidate_pairs), tp, prec, rec))\n return prec, rec\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n \n prec_series = []\n rec_series = []\n \n for t in ts:\n print(\"Doing LSH with t=\", t) \n lsh = LSH(t)\n lsh.do_lsh(mh)\n \n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs) \n prec_series.append(prec)\n rec_series.append(rec)\n \n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n \n return exp_df",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태
print(dic['name'])
|
normal
|
{
"blob_id": "09c3a10230e7d0b3b893ccf236c39fc2dc12b2c6",
"index": 1097,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dic['name'])\n",
"step-3": "dic = {'name': 'Eric', 'age': '25'}\nprint(dic['name'])\n",
"step-4": "dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태\n\n\nprint(dic['name'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import argparse
import tensorboardX as tb
import torch as th
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as D
import data
import mlp
import resnet
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--bst', nargs='+', type=int, help='Batch Size for Training')
parser.add_argument('--bsi', type=int, help='Batch Size for Inference')
parser.add_argument('--ds', type=str, help='DataSet')
parser.add_argument('--gpu', type=int, help='GPU')
parser.add_argument('--id', type=str, help='IDentifier')
parser.add_argument('--log-every', type=int, help='LOG statistics EVERY _ iterations')
parser.add_argument('--loss', type=str, help='LOSS')
parser.add_argument('--lr', type=float, help='Learning Rate')
parser.add_argument('--metric', type=str, help='METRIC')
parser.add_argument('--model', type=str, help='MODEL')
parser.add_argument('--ni', type=int, help='Number of Iterations')
parser.add_argument('--opt', type=str, help='OPTimizer')
parser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')
parser.add_argument('--tb', action='store_true', help='TensorBoard')
parser.add_argument('--w', type=float, help='Weight')
parser.add_argument('--wd', type=float, help='Weight Decay')
args = parser.parse_args()
x, y = {'adult' : data.load_adult,
'cifar10' : data.load_multi_cifar10,
'cifar100' : data.load_multi_cifar100,
'covtype' : data.load_covtype,
'kddcup08' : data.load_kddcup08,
'letter' : data.load_multi_letter,
'mnist' : data.load_multi_mnist}[args.ds]()
x, y = data.shuffle(x, y)
[[train_xx, train_yy],
[val_xx, val_yy],
[test_xx, test_yy]] = data.partition(x, y, args.ptt)
train_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)
train_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)
train_x, val_x, test_x = data.normalize([train_x, val_x, test_x])
train_xx = th.split(train_x, [len(x) for x in train_xx])
train_datasets = [D.TensorDataset(x) for x in train_xx]
train_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)
val_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)
test_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)
pclass_list = [len(y) / len(train_y) for y in train_yy]
n_classes = len(train_yy)
if len(args.bst) == n_classes:
bs_list = args.bst
elif len(args.bst) == 1:
bs_list = [args.bst[0]] * n_classes
else:
raise RuntimeError()
train_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) \
for ds, bs in zip(train_datasets, bs_list)]
if args.model == 'linear':
model = th.nn.Linear(train_x.size(1), n_classes)
elif args.model == 'mlp':
model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)
elif args.model == 'resnet':
model = resnet.ResNet(18, n_classes)[args.model]
else:
raise RuntimeError()
dev = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)
model = model.to(dev)
params = list(model.parameters())
kwargs = {'params' : params, 'lr' : args.lr, 'weight_decay' : args.wd}
opt = {'sgd' : optim.SGD(**kwargs),
'adam' : optim.Adam(amsgrad=True, **kwargs)}[args.opt]
metric = getattr(utils, args.metric)
if args.tb:
path = 'tb/%s' % args.id
writer = tb.SummaryWriter(path)
train_writer = tb.SummaryWriter(path + '/a')
val_writer = tb.SummaryWriter(path + '/b')
test_writer = tb.SummaryWriter(path + '/c')
def infer(loader, model):
yy = []
y_barr = []
for x, y in loader:
x, y = x.to(dev), y.to(dev)
y_bar = th.max(model(x), 1)[1]
yy.append(y)
y_barr.append(y_bar)
y = th.cat(yy)
y_bar = th.cat(y_barr)
return y, y_bar
def log(model, i):
mmm = []
for loader in train_loader, val_loader, test_loader:
y, y_bar = infer(loader, model)
a = th.sum(y == y_bar).item() / len(y)
fnfn = utils.fn_mc(y, y_bar, n_classes)
fpfp = utils.fp_mc(y, y_bar, n_classes)
m = metric(pclass_list, fnfn, fpfp)
mmm.append([a, m])
tagg = ['a', args.metric]
placeholder = '0' * (len(str(args.ni)) - len(str(i)))
xx = ['/'.join(['%0.2f' % m for m in mm]) for mm in zip(*mmm)]
x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))
print('[iteration %s%d]%s' % ((placeholder, i, x)))
if args.tb:
for writer, mm in zip([train_writer, val_writer, test_writer], mmm):
for tag, m in zip(tagg, mm):
writer.add_scalar(tag, m, i)
utils.eval(model)
log(model, 0)
for i in range(args.ni):
xx = [next(loader)[0].to(dev) for loader in train_loaders]
x = th.cat(xx)
utils.train(model)
z = F.softmax(model(x), 1)
zz = th.split(z, [len(x) for x in xx])
pneg_list = [1 - th.mean(z[:, i]) for i, z in enumerate(zz)]
fnfn = [p_class * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]
fpfp = [(1 - p_class) * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]
if args.w > 0:
loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn, fpfp))
else:
loss = -metric(pclass_list, fnfn, fpfp)
opt.zero_grad()
loss.backward()
opt.step()
utils.eval(model)
if (i + 1) % args.log_every == 0:
log(model, i + 1)
|
normal
|
{
"blob_id": "92bcfff733e5f305ad1276ceb39a72a8f0fcb214",
"index": 8038,
"step-1": "<mask token>\n\n\ndef log(model, i):\n mmm = []\n for loader in (train_loader, val_loader, test_loader):\n y, y_bar = infer(loader, model)\n a = th.sum(y == y_bar).item() / len(y)\n fnfn = utils.fn_mc(y, y_bar, n_classes)\n fpfp = utils.fp_mc(y, y_bar, n_classes)\n m = metric(pclass_list, fnfn, fpfp)\n mmm.append([a, m])\n tagg = ['a', args.metric]\n placeholder = '0' * (len(str(args.ni)) - len(str(i)))\n xx = ['/'.join([('%0.2f' % m) for m in mm]) for mm in zip(*mmm)]\n x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))\n print('[iteration %s%d]%s' % (placeholder, i, x))\n if args.tb:\n for writer, mm in zip([train_writer, val_writer, test_writer], mmm):\n for tag, m in zip(tagg, mm):\n writer.add_scalar(tag, m, i)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--bst', nargs='+', type=int, help=\n 'Batch Size for Training')\nparser.add_argument('--bsi', type=int, help='Batch Size for Inference')\nparser.add_argument('--ds', type=str, help='DataSet')\nparser.add_argument('--gpu', type=int, help='GPU')\nparser.add_argument('--id', type=str, help='IDentifier')\nparser.add_argument('--log-every', type=int, help=\n 'LOG statistics EVERY _ iterations')\nparser.add_argument('--loss', type=str, help='LOSS')\nparser.add_argument('--lr', type=float, help='Learning Rate')\nparser.add_argument('--metric', type=str, help='METRIC')\nparser.add_argument('--model', type=str, help='MODEL')\nparser.add_argument('--ni', type=int, help='Number of Iterations')\nparser.add_argument('--opt', type=str, help='OPTimizer')\nparser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')\nparser.add_argument('--tb', action='store_true', help='TensorBoard')\nparser.add_argument('--w', type=float, help='Weight')\nparser.add_argument('--wd', type=float, help='Weight Decay')\n<mask token>\nif len(args.bst) == n_classes:\n bs_list = args.bst\nelif len(args.bst) == 1:\n bs_list = [args.bst[0]] * n_classes\nelse:\n raise RuntimeError()\n<mask token>\nif args.model == 'linear':\n model = th.nn.Linear(train_x.size(1), n_classes)\nelif args.model == 'mlp':\n model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)\nelif args.model == 'resnet':\n model = resnet.ResNet(18, n_classes)[args.model]\nelse:\n raise RuntimeError()\n<mask token>\nif args.tb:\n path = 'tb/%s' % args.id\n writer = tb.SummaryWriter(path)\n train_writer = tb.SummaryWriter(path + '/a')\n val_writer = tb.SummaryWriter(path + '/b')\n test_writer = tb.SummaryWriter(path + '/c')\n\n\ndef infer(loader, model):\n yy = []\n y_barr = []\n for x, y in loader:\n x, y = x.to(dev), y.to(dev)\n y_bar = th.max(model(x), 1)[1]\n yy.append(y)\n y_barr.append(y_bar)\n y = th.cat(yy)\n y_bar = th.cat(y_barr)\n return y, y_bar\n\n\ndef log(model, i):\n mmm = []\n for loader in (train_loader, val_loader, test_loader):\n y, y_bar = infer(loader, model)\n a = th.sum(y == y_bar).item() / len(y)\n fnfn = utils.fn_mc(y, y_bar, n_classes)\n fpfp = utils.fp_mc(y, y_bar, n_classes)\n m = metric(pclass_list, fnfn, fpfp)\n mmm.append([a, m])\n tagg = ['a', args.metric]\n placeholder = '0' * (len(str(args.ni)) - len(str(i)))\n xx = ['/'.join([('%0.2f' % m) for m in mm]) for mm in zip(*mmm)]\n x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))\n print('[iteration %s%d]%s' % (placeholder, i, x))\n if args.tb:\n for writer, mm in zip([train_writer, val_writer, test_writer], mmm):\n for tag, m in zip(tagg, mm):\n writer.add_scalar(tag, m, i)\n\n\nutils.eval(model)\nlog(model, 0)\nfor i in range(args.ni):\n xx = [next(loader)[0].to(dev) for loader in train_loaders]\n x = th.cat(xx)\n utils.train(model)\n z = F.softmax(model(x), 1)\n zz = th.split(z, [len(x) for x in xx])\n pneg_list = [(1 - th.mean(z[:, i])) for i, z in enumerate(zz)]\n fnfn = [(p_class * p_neg) for p_class, p_neg in zip(pclass_list, pneg_list)\n ]\n fpfp = [((1 - p_class) * p_neg) for p_class, p_neg in zip(pclass_list,\n pneg_list)]\n if args.w > 0:\n loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn,\n fpfp))\n else:\n loss = -metric(pclass_list, fnfn, fpfp)\n opt.zero_grad()\n loss.backward()\n opt.step()\n utils.eval(model)\n if (i + 1) % args.log_every == 0:\n log(model, i + 1)\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('--bst', nargs='+', type=int, help=\n 'Batch Size for Training')\nparser.add_argument('--bsi', type=int, help='Batch Size for Inference')\nparser.add_argument('--ds', type=str, help='DataSet')\nparser.add_argument('--gpu', type=int, help='GPU')\nparser.add_argument('--id', type=str, help='IDentifier')\nparser.add_argument('--log-every', type=int, help=\n 'LOG statistics EVERY _ iterations')\nparser.add_argument('--loss', type=str, help='LOSS')\nparser.add_argument('--lr', type=float, help='Learning Rate')\nparser.add_argument('--metric', type=str, help='METRIC')\nparser.add_argument('--model', type=str, help='MODEL')\nparser.add_argument('--ni', type=int, help='Number of Iterations')\nparser.add_argument('--opt', type=str, help='OPTimizer')\nparser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')\nparser.add_argument('--tb', action='store_true', help='TensorBoard')\nparser.add_argument('--w', type=float, help='Weight')\nparser.add_argument('--wd', type=float, help='Weight Decay')\nargs = parser.parse_args()\nx, y = {'adult': data.load_adult, 'cifar10': data.load_multi_cifar10,\n 'cifar100': data.load_multi_cifar100, 'covtype': data.load_covtype,\n 'kddcup08': data.load_kddcup08, 'letter': data.load_multi_letter,\n 'mnist': data.load_multi_mnist}[args.ds]()\nx, y = data.shuffle(x, y)\n[[train_xx, train_yy], [val_xx, val_yy], [test_xx, test_yy]] = data.partition(x\n , y, args.ptt)\ntrain_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)\ntrain_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)\ntrain_x, val_x, test_x = data.normalize([train_x, val_x, test_x])\ntrain_xx = th.split(train_x, [len(x) for x in train_xx])\ntrain_datasets = [D.TensorDataset(x) for x in train_xx]\ntrain_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)\nval_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)\ntest_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)\npclass_list = [(len(y) / len(train_y)) for y in train_yy]\nn_classes = len(train_yy)\nif len(args.bst) == n_classes:\n bs_list = args.bst\nelif len(args.bst) == 1:\n bs_list = [args.bst[0]] * n_classes\nelse:\n raise RuntimeError()\ntrain_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) for ds, bs in\n zip(train_datasets, bs_list)]\nif args.model == 'linear':\n model = th.nn.Linear(train_x.size(1), n_classes)\nelif args.model == 'mlp':\n model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)\nelif args.model == 'resnet':\n model = resnet.ResNet(18, n_classes)[args.model]\nelse:\n raise RuntimeError()\ndev = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)\nmodel = model.to(dev)\nparams = list(model.parameters())\nkwargs = {'params': params, 'lr': args.lr, 'weight_decay': args.wd}\nopt = {'sgd': optim.SGD(**kwargs), 'adam': optim.Adam(amsgrad=True, **kwargs)}[\n args.opt]\nmetric = getattr(utils, args.metric)\nif args.tb:\n path = 'tb/%s' % args.id\n writer = tb.SummaryWriter(path)\n train_writer = tb.SummaryWriter(path + '/a')\n val_writer = tb.SummaryWriter(path + '/b')\n test_writer = tb.SummaryWriter(path + '/c')\n\n\ndef infer(loader, model):\n yy = []\n y_barr = []\n for x, y in loader:\n x, y = x.to(dev), y.to(dev)\n y_bar = th.max(model(x), 1)[1]\n yy.append(y)\n y_barr.append(y_bar)\n y = th.cat(yy)\n y_bar = th.cat(y_barr)\n return y, y_bar\n\n\ndef log(model, i):\n mmm = []\n for loader in (train_loader, val_loader, test_loader):\n y, y_bar = infer(loader, model)\n a = th.sum(y == y_bar).item() / len(y)\n fnfn = utils.fn_mc(y, y_bar, n_classes)\n fpfp = utils.fp_mc(y, y_bar, n_classes)\n m = metric(pclass_list, fnfn, fpfp)\n mmm.append([a, m])\n tagg = ['a', args.metric]\n placeholder = '0' * (len(str(args.ni)) - len(str(i)))\n xx = ['/'.join([('%0.2f' % m) for m in mm]) for mm in zip(*mmm)]\n x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))\n print('[iteration %s%d]%s' % (placeholder, i, x))\n if args.tb:\n for writer, mm in zip([train_writer, val_writer, test_writer], mmm):\n for tag, m in zip(tagg, mm):\n writer.add_scalar(tag, m, i)\n\n\nutils.eval(model)\nlog(model, 0)\nfor i in range(args.ni):\n xx = [next(loader)[0].to(dev) for loader in train_loaders]\n x = th.cat(xx)\n utils.train(model)\n z = F.softmax(model(x), 1)\n zz = th.split(z, [len(x) for x in xx])\n pneg_list = [(1 - th.mean(z[:, i])) for i, z in enumerate(zz)]\n fnfn = [(p_class * p_neg) for p_class, p_neg in zip(pclass_list, pneg_list)\n ]\n fpfp = [((1 - p_class) * p_neg) for p_class, p_neg in zip(pclass_list,\n pneg_list)]\n if args.w > 0:\n loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn,\n fpfp))\n else:\n loss = -metric(pclass_list, fnfn, fpfp)\n opt.zero_grad()\n loss.backward()\n opt.step()\n utils.eval(model)\n if (i + 1) % args.log_every == 0:\n log(model, i + 1)\n",
"step-4": "import argparse\nimport tensorboardX as tb\nimport torch as th\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as D\nimport data\nimport mlp\nimport resnet\nimport utils\nparser = argparse.ArgumentParser()\nparser.add_argument('--bst', nargs='+', type=int, help=\n 'Batch Size for Training')\nparser.add_argument('--bsi', type=int, help='Batch Size for Inference')\nparser.add_argument('--ds', type=str, help='DataSet')\nparser.add_argument('--gpu', type=int, help='GPU')\nparser.add_argument('--id', type=str, help='IDentifier')\nparser.add_argument('--log-every', type=int, help=\n 'LOG statistics EVERY _ iterations')\nparser.add_argument('--loss', type=str, help='LOSS')\nparser.add_argument('--lr', type=float, help='Learning Rate')\nparser.add_argument('--metric', type=str, help='METRIC')\nparser.add_argument('--model', type=str, help='MODEL')\nparser.add_argument('--ni', type=int, help='Number of Iterations')\nparser.add_argument('--opt', type=str, help='OPTimizer')\nparser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')\nparser.add_argument('--tb', action='store_true', help='TensorBoard')\nparser.add_argument('--w', type=float, help='Weight')\nparser.add_argument('--wd', type=float, help='Weight Decay')\nargs = parser.parse_args()\nx, y = {'adult': data.load_adult, 'cifar10': data.load_multi_cifar10,\n 'cifar100': data.load_multi_cifar100, 'covtype': data.load_covtype,\n 'kddcup08': data.load_kddcup08, 'letter': data.load_multi_letter,\n 'mnist': data.load_multi_mnist}[args.ds]()\nx, y = data.shuffle(x, y)\n[[train_xx, train_yy], [val_xx, val_yy], [test_xx, test_yy]] = data.partition(x\n , y, args.ptt)\ntrain_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)\ntrain_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)\ntrain_x, val_x, test_x = data.normalize([train_x, val_x, test_x])\ntrain_xx = th.split(train_x, [len(x) for x in train_xx])\ntrain_datasets = [D.TensorDataset(x) for x in train_xx]\ntrain_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)\nval_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)\ntest_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)\npclass_list = [(len(y) / len(train_y)) for y in train_yy]\nn_classes = len(train_yy)\nif len(args.bst) == n_classes:\n bs_list = args.bst\nelif len(args.bst) == 1:\n bs_list = [args.bst[0]] * n_classes\nelse:\n raise RuntimeError()\ntrain_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) for ds, bs in\n zip(train_datasets, bs_list)]\nif args.model == 'linear':\n model = th.nn.Linear(train_x.size(1), n_classes)\nelif args.model == 'mlp':\n model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)\nelif args.model == 'resnet':\n model = resnet.ResNet(18, n_classes)[args.model]\nelse:\n raise RuntimeError()\ndev = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)\nmodel = model.to(dev)\nparams = list(model.parameters())\nkwargs = {'params': params, 'lr': args.lr, 'weight_decay': args.wd}\nopt = {'sgd': optim.SGD(**kwargs), 'adam': optim.Adam(amsgrad=True, **kwargs)}[\n args.opt]\nmetric = getattr(utils, args.metric)\nif args.tb:\n path = 'tb/%s' % args.id\n writer = tb.SummaryWriter(path)\n train_writer = tb.SummaryWriter(path + '/a')\n val_writer = tb.SummaryWriter(path + '/b')\n test_writer = tb.SummaryWriter(path + '/c')\n\n\ndef infer(loader, model):\n yy = []\n y_barr = []\n for x, y in loader:\n x, y = x.to(dev), y.to(dev)\n y_bar = th.max(model(x), 1)[1]\n yy.append(y)\n y_barr.append(y_bar)\n y = th.cat(yy)\n y_bar = th.cat(y_barr)\n return y, y_bar\n\n\ndef log(model, i):\n mmm = []\n for loader in (train_loader, val_loader, test_loader):\n y, y_bar = infer(loader, model)\n a = th.sum(y == y_bar).item() / len(y)\n fnfn = utils.fn_mc(y, y_bar, n_classes)\n fpfp = utils.fp_mc(y, y_bar, n_classes)\n m = metric(pclass_list, fnfn, fpfp)\n mmm.append([a, m])\n tagg = ['a', args.metric]\n placeholder = '0' * (len(str(args.ni)) - len(str(i)))\n xx = ['/'.join([('%0.2f' % m) for m in mm]) for mm in zip(*mmm)]\n x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))\n print('[iteration %s%d]%s' % (placeholder, i, x))\n if args.tb:\n for writer, mm in zip([train_writer, val_writer, test_writer], mmm):\n for tag, m in zip(tagg, mm):\n writer.add_scalar(tag, m, i)\n\n\nutils.eval(model)\nlog(model, 0)\nfor i in range(args.ni):\n xx = [next(loader)[0].to(dev) for loader in train_loaders]\n x = th.cat(xx)\n utils.train(model)\n z = F.softmax(model(x), 1)\n zz = th.split(z, [len(x) for x in xx])\n pneg_list = [(1 - th.mean(z[:, i])) for i, z in enumerate(zz)]\n fnfn = [(p_class * p_neg) for p_class, p_neg in zip(pclass_list, pneg_list)\n ]\n fpfp = [((1 - p_class) * p_neg) for p_class, p_neg in zip(pclass_list,\n pneg_list)]\n if args.w > 0:\n loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn,\n fpfp))\n else:\n loss = -metric(pclass_list, fnfn, fpfp)\n opt.zero_grad()\n loss.backward()\n opt.step()\n utils.eval(model)\n if (i + 1) % args.log_every == 0:\n log(model, i + 1)\n",
"step-5": "import argparse\nimport tensorboardX as tb\nimport torch as th\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as D\nimport data\nimport mlp\nimport resnet\nimport utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--bst', nargs='+', type=int, help='Batch Size for Training')\nparser.add_argument('--bsi', type=int, help='Batch Size for Inference')\nparser.add_argument('--ds', type=str, help='DataSet')\nparser.add_argument('--gpu', type=int, help='GPU')\nparser.add_argument('--id', type=str, help='IDentifier')\nparser.add_argument('--log-every', type=int, help='LOG statistics EVERY _ iterations')\nparser.add_argument('--loss', type=str, help='LOSS')\nparser.add_argument('--lr', type=float, help='Learning Rate')\nparser.add_argument('--metric', type=str, help='METRIC')\nparser.add_argument('--model', type=str, help='MODEL')\nparser.add_argument('--ni', type=int, help='Number of Iterations')\nparser.add_argument('--opt', type=str, help='OPTimizer')\nparser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')\nparser.add_argument('--tb', action='store_true', help='TensorBoard')\nparser.add_argument('--w', type=float, help='Weight')\nparser.add_argument('--wd', type=float, help='Weight Decay')\nargs = parser.parse_args()\n\nx, y = {'adult' : data.load_adult,\n 'cifar10' : data.load_multi_cifar10,\n 'cifar100' : data.load_multi_cifar100,\n 'covtype' : data.load_covtype,\n 'kddcup08' : data.load_kddcup08,\n 'letter' : data.load_multi_letter,\n 'mnist' : data.load_multi_mnist}[args.ds]()\nx, y = data.shuffle(x, y)\n[[train_xx, train_yy],\n [val_xx, val_yy],\n [test_xx, test_yy]] = data.partition(x, y, args.ptt)\ntrain_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)\ntrain_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)\ntrain_x, val_x, test_x = data.normalize([train_x, val_x, test_x])\ntrain_xx = th.split(train_x, [len(x) for x in train_xx])\ntrain_datasets = [D.TensorDataset(x) for x in train_xx]\ntrain_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)\nval_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)\ntest_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)\npclass_list = [len(y) / len(train_y) for y in train_yy]\n\nn_classes = len(train_yy)\nif len(args.bst) == n_classes:\n bs_list = args.bst\nelif len(args.bst) == 1:\n bs_list = [args.bst[0]] * n_classes\nelse:\n raise RuntimeError()\ntrain_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) \\\n for ds, bs in zip(train_datasets, bs_list)]\n\nif args.model == 'linear':\n model = th.nn.Linear(train_x.size(1), n_classes)\nelif args.model == 'mlp':\n model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)\nelif args.model == 'resnet':\n model = resnet.ResNet(18, n_classes)[args.model]\nelse:\n raise RuntimeError()\ndev = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)\nmodel = model.to(dev)\nparams = list(model.parameters())\nkwargs = {'params' : params, 'lr' : args.lr, 'weight_decay' : args.wd}\nopt = {'sgd' : optim.SGD(**kwargs),\n 'adam' : optim.Adam(amsgrad=True, **kwargs)}[args.opt]\nmetric = getattr(utils, args.metric)\n\nif args.tb:\n path = 'tb/%s' % args.id\n writer = tb.SummaryWriter(path)\n train_writer = tb.SummaryWriter(path + '/a')\n val_writer = tb.SummaryWriter(path + '/b')\n test_writer = tb.SummaryWriter(path + '/c')\n\ndef infer(loader, model):\n yy = []\n y_barr = []\n for x, y in loader:\n x, y = x.to(dev), y.to(dev)\n y_bar = th.max(model(x), 1)[1]\n yy.append(y)\n y_barr.append(y_bar)\n y = th.cat(yy)\n y_bar = th.cat(y_barr)\n return y, y_bar\n\ndef log(model, i):\n mmm = []\n for loader in train_loader, val_loader, test_loader:\n y, y_bar = infer(loader, model)\n\n a = th.sum(y == y_bar).item() / len(y)\n fnfn = utils.fn_mc(y, y_bar, n_classes)\n fpfp = utils.fp_mc(y, y_bar, n_classes)\n m = metric(pclass_list, fnfn, fpfp)\n\n mmm.append([a, m])\n\n tagg = ['a', args.metric]\n\n placeholder = '0' * (len(str(args.ni)) - len(str(i)))\n xx = ['/'.join(['%0.2f' % m for m in mm]) for mm in zip(*mmm)]\n x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))\n print('[iteration %s%d]%s' % ((placeholder, i, x)))\n\n if args.tb:\n for writer, mm in zip([train_writer, val_writer, test_writer], mmm):\n for tag, m in zip(tagg, mm):\n writer.add_scalar(tag, m, i)\n\nutils.eval(model)\nlog(model, 0)\n\nfor i in range(args.ni):\n xx = [next(loader)[0].to(dev) for loader in train_loaders]\n x = th.cat(xx)\n utils.train(model)\n z = F.softmax(model(x), 1)\n zz = th.split(z, [len(x) for x in xx])\n pneg_list = [1 - th.mean(z[:, i]) for i, z in enumerate(zz)]\n fnfn = [p_class * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]\n fpfp = [(1 - p_class) * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]\n\n if args.w > 0:\n loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn, fpfp))\n else:\n loss = -metric(pclass_list, fnfn, fpfp)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n utils.eval(model)\n if (i + 1) % args.log_every == 0:\n log(model, i + 1)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# import the necessary packages
from .pigear import PiGear
from .camgear import CamGear
from .videogear import VideoGear
__all__ = ["PiGear", "CamGear", "VideoGear"]
|
normal
|
{
"blob_id": "3431e342c940b0d91f817c3e583728e55e305210",
"index": 8940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['PiGear', 'CamGear', 'VideoGear']\n",
"step-3": "from .pigear import PiGear\nfrom .camgear import CamGear\nfrom .videogear import VideoGear\n__all__ = ['PiGear', 'CamGear', 'VideoGear']\n",
"step-4": "# import the necessary packages\nfrom .pigear import PiGear\nfrom .camgear import CamGear\nfrom .videogear import VideoGear\n\n__all__ = [\"PiGear\", \"CamGear\", \"VideoGear\"]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model.fit(X_train, y_train)
<|reserved_special_token_0|>
print(predictions)
<|reserved_special_token_0|>
print(score)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
music_data = pd.read_csv(
'C:\\Users\\junha\\PythonProjects\\predict_music_preferences\\music.csv')
X = music_data.drop(columns=['genre'])
y = music_data['genre']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(predictions)
score = accuracy_score(y_test, predictions)
print(score)
<|reserved_special_token_1|>
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
music_data = pd.read_csv(
'C:\\Users\\junha\\PythonProjects\\predict_music_preferences\\music.csv')
X = music_data.drop(columns=['genre'])
y = music_data['genre']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(predictions)
score = accuracy_score(y_test, predictions)
print(score)
<|reserved_special_token_1|>
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# from sklearn import tree
# import joblib
music_data = pd.read_csv(r"C:\Users\junha\PythonProjects\predict_music_preferences\music.csv")
# print(music_data)
X = music_data.drop(columns=['genre'])
y = music_data['genre']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(predictions)
score = accuracy_score(y_test, predictions)
print(score)
# joblib.dump(model, 'music-recommender.joblib')
# tree.export_graphviz(model, out_file='music-recommender.dot',
# feature_names=['age', 'gender'],
# class_names=sorted(y.unique()),
# label='all', rounded= True,
# filled=True)
|
flexible
|
{
"blob_id": "8dbcd7bba09f8acff860890d8201e016b587796d",
"index": 6149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.fit(X_train, y_train)\n<mask token>\nprint(predictions)\n<mask token>\nprint(score)\n",
"step-3": "<mask token>\nmusic_data = pd.read_csv(\n 'C:\\\\Users\\\\junha\\\\PythonProjects\\\\predict_music_preferences\\\\music.csv')\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\nscore = accuracy_score(y_test, predictions)\nprint(score)\n",
"step-4": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nmusic_data = pd.read_csv(\n 'C:\\\\Users\\\\junha\\\\PythonProjects\\\\predict_music_preferences\\\\music.csv')\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\nscore = accuracy_score(y_test, predictions)\nprint(score)\n",
"step-5": "\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n# from sklearn import tree\n# import joblib\nmusic_data = pd.read_csv(r\"C:\\Users\\junha\\PythonProjects\\predict_music_preferences\\music.csv\")\n# print(music_data)\n\n\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\n\nscore = accuracy_score(y_test, predictions)\nprint(score)\n\n# joblib.dump(model, 'music-recommender.joblib')\n\n# tree.export_graphviz(model, out_file='music-recommender.dot',\n# feature_names=['age', 'gender'],\n# class_names=sorted(y.unique()), \n# label='all', rounded= True,\n# filled=True)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
Various tools for cleaning out nulls and imputing
'''
|
flexible
|
{
"blob_id": "bd310ab0bc193410b8f93ad5516b0731d2eba54f",
"index": 6268,
"step-1": "<mask token>\n",
"step-2": "'''\nVarious tools for cleaning out nulls and imputing \n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import SetPasswordForm
from . import views
urlpatterns = [
url(regex=r'^(?P<pk>\d+)$', view=views.UserDetailView.as_view(), name='user_detail'),
url(regex=r'^update/(?P<pk>\d+)$', view=views.UserUpdateView.as_view(), name='user_update'),
url(regex=r'^email/update/(?P<pk>\d+)$', view=views.EmailUpdateView.as_view(), name='email_change'),
url(regex=r'^password/change$', view=auth_views.password_change,
kwargs={'template_name': 'accounts/password_change_form.html',
'current_app': 'accounts', 'password_change_form': SetPasswordForm},
name='password_change'),
url(regex=r'^password/change/done$', view=auth_views.password_change_done,
kwargs={'template_name': 'accounts/password_change_done.html', 'current_app': 'accounts'},
name='password_change_done'),
url(regex=r'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),
url(regex=r'^all_trainees$', view=views.AllTrainees.as_view(), name='trainee_information'),
]
|
normal
|
{
"blob_id": "1ac0f5c62ee3cb60d4443b65d429f4f0e6815100",
"index": 5488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url(regex='^(?P<pk>\\\\d+)$', view=views.UserDetailView.\n as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\\\d+)$', view\n =views.UserUpdateView.as_view(), name='user_update'), url(regex=\n '^email/update/(?P<pk>\\\\d+)$', view=views.EmailUpdateView.as_view(),\n name='email_change'), url(regex='^password/change$', view=auth_views.\n password_change, kwargs={'template_name':\n 'accounts/password_change_form.html', 'current_app': 'accounts',\n 'password_change_form': SetPasswordForm}, name='password_change'), url(\n regex='^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html',\n 'current_app': 'accounts'}, name='password_change_done'), url(regex=\n '^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=\n 'trainee_information')]\n",
"step-3": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom . import views\nurlpatterns = [url(regex='^(?P<pk>\\\\d+)$', view=views.UserDetailView.\n as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\\\d+)$', view\n =views.UserUpdateView.as_view(), name='user_update'), url(regex=\n '^email/update/(?P<pk>\\\\d+)$', view=views.EmailUpdateView.as_view(),\n name='email_change'), url(regex='^password/change$', view=auth_views.\n password_change, kwargs={'template_name':\n 'accounts/password_change_form.html', 'current_app': 'accounts',\n 'password_change_form': SetPasswordForm}, name='password_change'), url(\n regex='^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html',\n 'current_app': 'accounts'}, name='password_change_done'), url(regex=\n '^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=\n 'trainee_information')]\n",
"step-4": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.forms import SetPasswordForm\n\nfrom . import views\n\nurlpatterns = [\n url(regex=r'^(?P<pk>\\d+)$', view=views.UserDetailView.as_view(), name='user_detail'),\n url(regex=r'^update/(?P<pk>\\d+)$', view=views.UserUpdateView.as_view(), name='user_update'),\n url(regex=r'^email/update/(?P<pk>\\d+)$', view=views.EmailUpdateView.as_view(), name='email_change'),\n url(regex=r'^password/change$', view=auth_views.password_change,\n kwargs={'template_name': 'accounts/password_change_form.html',\n 'current_app': 'accounts', 'password_change_form': SetPasswordForm},\n name='password_change'),\n url(regex=r'^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html', 'current_app': 'accounts'},\n name='password_change_done'),\n url(regex=r'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex=r'^all_trainees$', view=views.AllTrainees.as_view(), name='trainee_information'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("The sum of 'numbers' is:", sum(numbers))
print("The largest of 'numbers' is:", max(numbers))
print("The smallest of 'numbers' is:", min(numbers))
for i in numbers:
if i % 2 == 0:
print(i, 'is even.')
for i in numbers:
if i > 0:
print(i, 'is positive.')
<|reserved_special_token_0|>
for i in numbers:
if i > 0:
posNums.append(i)
print(posNums)
<|reserved_special_token_0|>
for x in numbers:
times5.append(x * 5)
print(times5)
<|reserved_special_token_0|>
for i in range(0, len(a)):
ab.append(a[i] * b[i])
print(ab)
<|reserved_special_token_0|>
for h in range(len(m)):
row = []
for j in range(len(m[h])):
row.append(m[h][j] + n[h][j])
m_n.append(row)
print(m_n)
<|reserved_special_token_0|>
for z in dupList:
if z not in noDup:
noDup.append(z)
print(noDup)
<|reserved_special_token_1|>
numbers = [10, 20, 30, 9, -12]
print("The sum of 'numbers' is:", sum(numbers))
print("The largest of 'numbers' is:", max(numbers))
print("The smallest of 'numbers' is:", min(numbers))
for i in numbers:
if i % 2 == 0:
print(i, 'is even.')
for i in numbers:
if i > 0:
print(i, 'is positive.')
posNums = []
for i in numbers:
if i > 0:
posNums.append(i)
print(posNums)
times5 = []
for x in numbers:
times5.append(x * 5)
print(times5)
a = [1, 3, 6]
b = [2, 4, 6]
ab = []
for i in range(0, len(a)):
ab.append(a[i] * b[i])
print(ab)
m = [[1, 2], [7, 8], [3, 4]]
n = [[3, 4], [5, 6], [3, 4]]
m_n = []
for h in range(len(m)):
row = []
for j in range(len(m[h])):
row.append(m[h][j] + n[h][j])
m_n.append(row)
print(m_n)
dupList = ['x', 'y', 'z', 'y', 23, 0.5, 23]
noDup = []
for z in dupList:
if z not in noDup:
noDup.append(z)
print(noDup)
<|reserved_special_token_1|>
# Ex 1
numbers = [10,20,30, 9,-12]
print("The sum of 'numbers' is:",sum(numbers))
# Ex 2
print("The largest of 'numbers' is:",max(numbers))
# Ex 3
print("The smallest of 'numbers' is:",min(numbers))
# Ex 4
for i in numbers:
if (i % 2 == 0):
print(i,"is even.")
# Ex 5
for i in numbers:
if (i > 0):
print(i,"is positive.")
# Ex 6
posNums = []
for i in numbers:
if (i > 0):
posNums.append(i)
print(posNums)
# Ex 7
times5 = []
for x in numbers:
times5.append(x*5)
print(times5)
# Ex 8
a=[1,3,6]
b=[2,4,6]
ab = []
for i in range(0, len(a)):
ab.append(a[i]*b[i])
print(ab)
# Ex 9 and 10
m=[[1,2],[7,8],[3,4]]
n=[[3,4],[5,6],[3,4]]
m_n = []
for h in range(len(m)):
row = []
for j in range(len(m[h])):
row.append(m[h][j] + n[h][j])
m_n.append(row)
print(m_n)
# Ex 11
dupList = ["x","y","z","y",23,0.5,23]
noDup = []
for z in dupList:
if(z not in noDup):
noDup.append(z)
print(noDup)
|
flexible
|
{
"blob_id": "ce8879dae6c7585a727e35f588722bc28045256a",
"index": 8569,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"The sum of 'numbers' is:\", sum(numbers))\nprint(\"The largest of 'numbers' is:\", max(numbers))\nprint(\"The smallest of 'numbers' is:\", min(numbers))\nfor i in numbers:\n if i % 2 == 0:\n print(i, 'is even.')\nfor i in numbers:\n if i > 0:\n print(i, 'is positive.')\n<mask token>\nfor i in numbers:\n if i > 0:\n posNums.append(i)\nprint(posNums)\n<mask token>\nfor x in numbers:\n times5.append(x * 5)\nprint(times5)\n<mask token>\nfor i in range(0, len(a)):\n ab.append(a[i] * b[i])\nprint(ab)\n<mask token>\nfor h in range(len(m)):\n row = []\n for j in range(len(m[h])):\n row.append(m[h][j] + n[h][j])\n m_n.append(row)\nprint(m_n)\n<mask token>\nfor z in dupList:\n if z not in noDup:\n noDup.append(z)\nprint(noDup)\n",
"step-3": "numbers = [10, 20, 30, 9, -12]\nprint(\"The sum of 'numbers' is:\", sum(numbers))\nprint(\"The largest of 'numbers' is:\", max(numbers))\nprint(\"The smallest of 'numbers' is:\", min(numbers))\nfor i in numbers:\n if i % 2 == 0:\n print(i, 'is even.')\nfor i in numbers:\n if i > 0:\n print(i, 'is positive.')\nposNums = []\nfor i in numbers:\n if i > 0:\n posNums.append(i)\nprint(posNums)\ntimes5 = []\nfor x in numbers:\n times5.append(x * 5)\nprint(times5)\na = [1, 3, 6]\nb = [2, 4, 6]\nab = []\nfor i in range(0, len(a)):\n ab.append(a[i] * b[i])\nprint(ab)\nm = [[1, 2], [7, 8], [3, 4]]\nn = [[3, 4], [5, 6], [3, 4]]\nm_n = []\nfor h in range(len(m)):\n row = []\n for j in range(len(m[h])):\n row.append(m[h][j] + n[h][j])\n m_n.append(row)\nprint(m_n)\ndupList = ['x', 'y', 'z', 'y', 23, 0.5, 23]\nnoDup = []\nfor z in dupList:\n if z not in noDup:\n noDup.append(z)\nprint(noDup)\n",
"step-4": "# Ex 1\nnumbers = [10,20,30, 9,-12]\nprint(\"The sum of 'numbers' is:\",sum(numbers))\n\n# Ex 2\nprint(\"The largest of 'numbers' is:\",max(numbers))\n# Ex 3\nprint(\"The smallest of 'numbers' is:\",min(numbers))\n# Ex 4\nfor i in numbers:\n if (i % 2 == 0):\n print(i,\"is even.\")\n# Ex 5\nfor i in numbers:\n if (i > 0):\n print(i,\"is positive.\")\n# Ex 6 \nposNums = [] \nfor i in numbers:\n if (i > 0):\n posNums.append(i)\n \nprint(posNums)\n# Ex 7\ntimes5 = []\nfor x in numbers:\n times5.append(x*5)\n\nprint(times5)\n\n# Ex 8\na=[1,3,6]\nb=[2,4,6]\nab = []\nfor i in range(0, len(a)):\n ab.append(a[i]*b[i])\n\nprint(ab)\n\n# Ex 9 and 10\nm=[[1,2],[7,8],[3,4]]\nn=[[3,4],[5,6],[3,4]]\nm_n = []\nfor h in range(len(m)):\n row = []\n for j in range(len(m[h])):\n row.append(m[h][j] + n[h][j])\n m_n.append(row)\n\nprint(m_n)\n\n# Ex 11\ndupList = [\"x\",\"y\",\"z\",\"y\",23,0.5,23]\nnoDup = []\nfor z in dupList:\n if(z not in noDup):\n noDup.append(z)\n \nprint(noDup)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def firstDuplicate(array):
"""
Time O(n) | Space O(n)
"""
dic = {}
for num in array:
if num in dic:
return num
else:
dic[num] = True
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def firstDuplicate(array):
"""
Time O(n) | Space O(n)
"""
dic = {}
for num in array:
if num in dic:
return num
else:
dic[num] = True
return -1
print(firstDuplicate([2, 1, 3, 5, 3]))
|
flexible
|
{
"blob_id": "47259844f76f12060f0cf52f1086c05b9f300175",
"index": 8581,
"step-1": "<mask token>\n",
"step-2": "def firstDuplicate(array):\n \"\"\"\n Time O(n) | Space O(n)\n \"\"\"\n dic = {}\n for num in array:\n if num in dic:\n return num\n else:\n dic[num] = True\n return -1\n\n\n<mask token>\n",
"step-3": "def firstDuplicate(array):\n \"\"\"\n Time O(n) | Space O(n)\n \"\"\"\n dic = {}\n for num in array:\n if num in dic:\n return num\n else:\n dic[num] = True\n return -1\n\n\nprint(firstDuplicate([2, 1, 3, 5, 3]))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# %matplotlib inline
import tensorflow as tf
#import tensorflow.keras as K
import numpy as np
import math
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
# from keras import backend as K
from keras.models import Sequential, load_model
# from K.models import Sequential, load_model
from keras.layers import InputLayer, Input, Dense, Dropout
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.backend import clear_session
## pip install h5py scikit-optimize
## once you have that installed, you can run the following code.
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
matplotlib.use('GTKAgg')
from skopt.plots import plot_convergence
matplotlib.use('GTKAgg')
from skopt.plots import plot_objective, plot_evaluations
matplotlib.use('GTKAgg')
import csv
from timeit import default_timer as timer
#from skopt.plots import plot_histogram, plot_objective_2D
from skopt.utils import use_named_args
from sklearn.metrics import roc_auc_score ## Computer Area Under the Curve
from datetime import datetime ## time the Optimization time
## Load Datset
train_samples = np.loadtxt("data/train_samples.txt", delimiter=' ', comments='# ', encoding=None)
train_labels = np.loadtxt("data/train_labels.txt", delimiter=' ', comments='# ', encoding=None)
valid_samples = np.loadtxt("data/valid_samples.txt", delimiter=' ', comments='# ', encoding=None)
valid_labels = np.loadtxt("data/valid_labels.txt", delimiter=' ', comments='# ', encoding=None)
## To set up this search space, I first need to define the search space dimension, what parameters are we gonna explore.
## for each of the parameters, we define a dimension explicitly
##
## The learning rate is any real number between 0.000001 and 0.1. But the seraching is done not in bounds.
## 'log-uniform' specifies how the trasformation(updates) of these values is
learning_rate_dim = Real(low=1e-6, high=1e-2, prior='log-uniform', name='learning_rate')
## The number of alyers on the other hand is explored in bounds, increments are done using integers
dense_layers_dim = Integer(low=1, high=5, name='dense_layers')
## We'll also different number of nodes in a layer
nodes_dim = Integer(low=5, high=512, name='nodes')
## Finally we have a Categorical dimension, this needs to be specified explicitly, because scikit-learn
## isn't gonna generate some randomly for you
activation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')
## Combine all the parameters into a list, so that we can pass it to a function
dimensions = [learning_rate_dim,
dense_layers_dim,
nodes_dim,
activation_dim]
## To kick off, it's helpful to start the serach using a set of hyperparameters that we
## intuitively know performes well
## These default parameters aren't horrible, but they don't perform great either
default_parameters = [1e-5, 1, 16, 'relu']
## To log the performance of the model
def log_dir_name(learning_rate, dense_layers, nodes, activation):
"""
Creates a directory named after the set of hyperparameters that was recently selected. A helper function
to log the results of training every constructed model.
"""
# the dir-name for the TensorBoard log-dir
s = "./2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/"
log_dir = s.format(learning_rate, dense_layers, nodes, activation)
return log_dir
## This funcion is copied from my previous solution on Grid SearchCV
def create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):
"""
A helper function for the classifier to help construct a model after each run.
learing_rate: Learning-rate for the optimizer.
dense_layer: Number of dense layers for the sequentail model
nodes: Number of nodes in each inner dense layer.
activation: Activation function for all layers.
Additionally, we can improve on this function by adding a separate activation for
the output layer.
"""
model = Sequential()
global train_samples
## Input-shape must be a tuple without the batch size.
input_shape = (1,) + train_samples.shape
model.add(InputLayer(input_shape=(len(train_samples[0]),)))
## Needful only in case of convolutional layers.
# model.add(Reshape(img_shape_full))
for i in range(dense_layers):
## Name each layer, because Keras should give them unique names.
name = 'layer_dense_{0}'.format(i+1)
## Add these fully-connected layers to the model.
model.add(Dense(nodes, activation=activation, name=name))
model.add(Dropout(dropout_rate))
## Last output layer with softmax-activation.
## Used heavily for classification.
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=learning_rate)
## Compile the model
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
## Before we start training any model, let's first save the path where we'll store the best-performing model.
best_model_path = '19_best_model.keras'
## A global variable to keep track of the best obtained accuracy.
best_auc = 0.0
@use_named_args(dimensions=dimensions)
def fitness(learning_rate, dense_layers, nodes, activation):
"""
"""
# Print the selected hyperparameters.
print('learning rate: {0:.1f}'.format(learning_rate))
print('num_dense_layers:', dense_layers)
print('num_nodes:', nodes)
print('activation:', activation)
print("")
## Create the neural network with these hyperparameters.
model = create_model(learning_rate, dense_layers, nodes, activation)
## Create log files for the model.
## Not important for now!
# callback_log = TensorBoard(
# log_dir=log_dir,
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False)
## Use Keras to train the model.
history = model.fit(x=train_samples,
y=train_labels,
epochs=10,
batch_size=int(4010/4))
#callbacks=[callback_log])
## Get the classification accuracy on the validation set after the last training epoch.
# accuracy = history.history['val_acc'][-1]
predictions = model.predict(valid_samples)
auc = roc_auc_score(valid_labels, predictions)
## Print the calssification accuracy.
print('')
print("AUC = : {0:.2%}".format(auc))
print('')
## Save the model if it improves on the best-found performance.
## We use the global keyword so we update the variable outside of this function.
global best_auc
if auc > best_auc:
## Save the new model to harddisk.
model.save(best_model_path)
## Update the classification accuracy.
best_auc = auc
## Delete the Keras model with these heyper parameters from memory.
## Also clear the session.
del model
# tf.keras.clear_session()
clear_session()
return -auc
## Now we run our fitness function with the default hyperparameters that we set earlier.
## That's the reason for the @ annotation
fitness(x=default_parameters)
search_result = gp_minimize(func=fitness,
dimensions=dimensions,
acq_func='EI', # Expected Improvement.
n_calls=40,
x0=default_parameters)
## Report Result of the optimizer.
print("Best serach results:")
print(search_result.x)
print(search_result.space)
print("Lowest fitness value:")
print(search_result.fun)
zipped = sorted(zip(search_result.func_vals, search_result.x_iters))
print(zipped)
## Write sorted results to csv file for exporting
of = open('output_bayesian_optimization.csv', 'w')
header="Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\n"
of.write(header)
for i in zipped:
row = "{0}; {1}; {2}; {3}; {4};\n".format(i[0], i[1][0], i[1][1], i[1][2], i[1][3])
of.write(row)
of.close()
## Plot results of optimizer
dim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']
plot_objective(search_result, dimensions=dim_names)
plot_evaluations(search_result)
|
normal
|
{
"blob_id": "db9068e54607e9df48328435ef07f15b4c25a6db",
"index": 7412,
"step-1": "<mask token>\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\n<mask token>\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\n<mask token>\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\n<mask token>\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\n<mask token>\nprint(zipped)\n<mask token>\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\n<mask token>\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-3": "<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\ntrain_samples = np.loadtxt('data/train_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\ntrain_labels = np.loadtxt('data/train_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nvalid_samples = np.loadtxt('data/valid_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\nvalid_labels = np.loadtxt('data/valid_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nlearning_rate_dim = Real(low=1e-06, high=0.01, prior='log-uniform', name=\n 'learning_rate')\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\nnodes_dim = Integer(low=5, high=512, name='nodes')\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\ndimensions = [learning_rate_dim, dense_layers_dim, nodes_dim, activation_dim]\ndefault_parameters = [1e-05, 1, 16, 'relu']\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\nbest_model_path = '19_best_model.keras'\nbest_auc = 0.0\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\nsearch_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func=\n 'EI', n_calls=40, x0=default_parameters)\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\nof = open('output_bayesian_optimization.csv', 'w')\nheader = 'Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n'\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport math\nimport matplotlib\nmatplotlib.use('GTKAgg')\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential, load_model\nfrom keras.layers import InputLayer, Input, Dense, Dropout\nfrom keras.callbacks import TensorBoard\nfrom keras.optimizers import Adam\nfrom keras.backend import clear_session\nimport skopt\nfrom skopt import gp_minimize, forest_minimize\nfrom skopt.space import Real, Categorical, Integer\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_convergence\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_objective, plot_evaluations\nmatplotlib.use('GTKAgg')\nimport csv\nfrom timeit import default_timer as timer\nfrom skopt.utils import use_named_args\nfrom sklearn.metrics import roc_auc_score\nfrom datetime import datetime\ntrain_samples = np.loadtxt('data/train_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\ntrain_labels = np.loadtxt('data/train_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nvalid_samples = np.loadtxt('data/valid_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\nvalid_labels = np.loadtxt('data/valid_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nlearning_rate_dim = Real(low=1e-06, high=0.01, prior='log-uniform', name=\n 'learning_rate')\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\nnodes_dim = Integer(low=5, high=512, name='nodes')\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\ndimensions = [learning_rate_dim, dense_layers_dim, nodes_dim, activation_dim]\ndefault_parameters = [1e-05, 1, 16, 'relu']\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\nbest_model_path = '19_best_model.keras'\nbest_auc = 0.0\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\nsearch_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func=\n 'EI', n_calls=40, x0=default_parameters)\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\nof = open('output_bayesian_optimization.csv', 'w')\nheader = 'Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n'\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-5": "# %matplotlib inline\nimport tensorflow as tf\n#import tensorflow.keras as K\nimport numpy as np\nimport math\nimport matplotlib\nmatplotlib.use('GTKAgg')\nimport matplotlib.pyplot as plt\n\n# from keras import backend as K\nfrom keras.models import Sequential, load_model\n# from K.models import Sequential, load_model\nfrom keras.layers import InputLayer, Input, Dense, Dropout\nfrom keras.callbacks import TensorBoard\nfrom keras.optimizers import Adam\nfrom keras.backend import clear_session\n## pip install h5py scikit-optimize\n## once you have that installed, you can run the following code.\nimport skopt\nfrom skopt import gp_minimize, forest_minimize\nfrom skopt.space import Real, Categorical, Integer\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_convergence\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_objective, plot_evaluations\nmatplotlib.use('GTKAgg')\nimport csv\nfrom timeit import default_timer as timer\n\n#from skopt.plots import plot_histogram, plot_objective_2D\nfrom skopt.utils import use_named_args\nfrom sklearn.metrics import roc_auc_score ## Computer Area Under the Curve\nfrom datetime import datetime ## time the Optimization time\n\n## Load Datset\ntrain_samples = np.loadtxt(\"data/train_samples.txt\", delimiter=' ', comments='# ', encoding=None)\ntrain_labels = np.loadtxt(\"data/train_labels.txt\", delimiter=' ', comments='# ', encoding=None)\nvalid_samples = np.loadtxt(\"data/valid_samples.txt\", delimiter=' ', comments='# ', encoding=None)\nvalid_labels = np.loadtxt(\"data/valid_labels.txt\", delimiter=' ', comments='# ', encoding=None)\n\n## To set up this search space, I first need to define the search space dimension, what parameters are we gonna explore.\n## for each of the parameters, we define a dimension explicitly\n##\n## The learning rate is any real number between 0.000001 and 0.1. But the seraching is done not in bounds.\n## 'log-uniform' specifies how the trasformation(updates) of these values is \nlearning_rate_dim = Real(low=1e-6, high=1e-2, prior='log-uniform', name='learning_rate')\n## The number of alyers on the other hand is explored in bounds, increments are done using integers\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\n## We'll also different number of nodes in a layer\nnodes_dim = Integer(low=5, high=512, name='nodes')\n## Finally we have a Categorical dimension, this needs to be specified explicitly, because scikit-learn\n## isn't gonna generate some randomly for you\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\n## Combine all the parameters into a list, so that we can pass it to a function\ndimensions = [learning_rate_dim,\n\t\t\tdense_layers_dim,\n\t\t\tnodes_dim,\n\t\t\tactivation_dim]\n\n\n## To kick off, it's helpful to start the serach using a set of hyperparameters that we\n## intuitively know performes well\n## These default parameters aren't horrible, but they don't perform great either\ndefault_parameters = [1e-5, 1, 16, 'relu']\n\n\n## To log the performance of the model\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n\t\"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\t\n\t# the dir-name for the TensorBoard log-dir\n\ts = \"./2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/\"\n\tlog_dir = s.format(learning_rate, dense_layers, nodes, activation)\n\n\treturn log_dir\n\n\n## This funcion is copied from my previous solution on Grid SearchCV\ndef create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):\n\t\"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n\tmodel = Sequential()\n\tglobal train_samples\n\t## Input-shape must be a tuple without the batch size.\n\tinput_shape = (1,) + train_samples.shape\n\tmodel.add(InputLayer(input_shape=(len(train_samples[0]),)))\n\t## Needful only in case of convolutional layers.\n\t# model.add(Reshape(img_shape_full))\n\tfor i in range(dense_layers):\n\t\t## Name each layer, because Keras should give them unique names.\n\t\tname = 'layer_dense_{0}'.format(i+1)\n\t\t## Add these fully-connected layers to the model.\n\t\tmodel.add(Dense(nodes, activation=activation, name=name))\n\t\tmodel.add(Dropout(dropout_rate))\n\n\t## Last output layer with softmax-activation.\n\t## Used heavily for classification.\n\tmodel.add(Dense(1, activation='sigmoid'))\n\n\toptimizer = Adam(lr=learning_rate)\n\t## Compile the model\n\tmodel.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\n\treturn model\n\n \n## Before we start training any model, let's first save the path where we'll store the best-performing model.\nbest_model_path = '19_best_model.keras'\n## A global variable to keep track of the best obtained accuracy.\nbest_auc = 0.0\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n\t\"\"\"\n\t\"\"\"\n\t# Print the selected hyperparameters.\n\tprint('learning rate: {0:.1f}'.format(learning_rate))\n\tprint('num_dense_layers:', dense_layers)\n\tprint('num_nodes:', nodes)\n\tprint('activation:', activation)\n\tprint(\"\")\n\t## Create the neural network with these hyperparameters.\n\tmodel = create_model(learning_rate, dense_layers, nodes, activation)\n\t## Create log files for the model.\n\t## Not important for now!\n\t# callback_log = TensorBoard(\n\t# \tlog_dir=log_dir,\n\t# \thistogram_freq=0,\n\t# \tbatch_size=32,\n\t# \twrite_graph=True,\n\t# \twrite_grads=False,\n\t# \twrite_images=False)\n\t## Use Keras to train the model.\n\thistory = model.fit(x=train_samples,\n\t\ty=train_labels,\n\t\tepochs=10,\n\t\tbatch_size=int(4010/4))\n\t\t#callbacks=[callback_log])\n\t## Get the classification accuracy on the validation set after the last training epoch.\n\t# accuracy = history.history['val_acc'][-1]\n\tpredictions = model.predict(valid_samples)\n\tauc = roc_auc_score(valid_labels, predictions)\n\t## Print the calssification accuracy.\n\tprint('')\n\tprint(\"AUC = : {0:.2%}\".format(auc))\n\tprint('')\n\n\t## Save the model if it improves on the best-found performance.\n\t## We use the global keyword so we update the variable outside of this function.\n\tglobal best_auc\n\tif auc > best_auc:\n\t\t## Save the new model to harddisk.\n\t\tmodel.save(best_model_path)\n\t\t## Update the classification accuracy.\n\t\tbest_auc = auc\n\n\t## Delete the Keras model with these heyper parameters from memory.\n\n\t## Also clear the session.\n\tdel model\n# tf.keras.clear_session()\n\tclear_session()\n\n\treturn -auc\n\n## Now we run our fitness function with the default hyperparameters that we set earlier.\n## That's the reason for the @ annotation \nfitness(x=default_parameters)\n\nsearch_result = gp_minimize(func=fitness,\n\tdimensions=dimensions,\n\tacq_func='EI', # Expected Improvement.\n\tn_calls=40,\n\tx0=default_parameters)\n\n## Report Result of the optimizer.\nprint(\"Best serach results:\")\nprint(search_result.x)\nprint(search_result.space)\nprint(\"Lowest fitness value:\")\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\n\n## Write sorted results to csv file for exporting\nof = open('output_bayesian_optimization.csv', 'w')\nheader=\"Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n\"\nof.write(header)\nfor i in zipped:\n row = \"{0}; {1}; {2}; {3}; {4};\\n\".format(i[0], i[1][0], i[1][1], i[1][2], i[1][3])\n of.write(row)\nof.close()\n\n## Plot results of optimizer\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
{"filter":false,"title":"settings.py","tooltip":"/mysite/settings.py","undoManager":{"mark":53,"position":53,"stack":[[{"start":{"row":107,"column":13},"end":{"row":107,"column":16},"action":"remove","lines":["UTC"],"id":2},{"start":{"row":107,"column":13},"end":{"row":107,"column":23},"action":"insert","lines":["Asia/Tokyo"]}],[{"start":{"row":105,"column":17},"end":{"row":105,"column":22},"action":"remove","lines":["en-us"],"id":3},{"start":{"row":105,"column":17},"end":{"row":105,"column":18},"action":"insert","lines":["j"]},{"start":{"row":105,"column":18},"end":{"row":105,"column":19},"action":"insert","lines":["a"]}],[{"start":{"row":120,"column":0},"end":{"row":120,"column":46},"action":"insert","lines":["STATIC_ROOT = os.path.join(BASE_DIR, 'static')"],"id":4}],[{"start":{"row":27,"column":17},"end":{"row":27,"column":51},"action":"insert","lines":["'127.0.0.1', '.pythonanywhere.com'"],"id":5}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":52},"action":"insert","lines":[","],"id":6}],[{"start":{"row":27,"column":52},"end":{"row":27,"column":53},"action":"insert","lines":[" "],"id":7}],[{"start":{"row":27,"column":53},"end":{"row":27,"column":55},"action":"insert","lines":["''"],"id":8}],[{"start":{"row":27,"column":54},"end":{"row":27,"column":55},"action":"insert","lines":["."],"id":9},{"start":{"row":27,"column":55},"end":{"row":27,"column":56},"action":"insert","lines":["a"]},{"start":{"row":27,"column":56},"end":{"row":27,"column":57},"action":"insert","lines":["m"]},{"start":{"row":27,"column":57},"end":{"row":27,"column":58},"action":"insert","lines":["a"]},{"start":{"row":27,"column":58},"end":{"row":27,"column":59},"action":"insert","lines":["z"]},{"start":{"row":27,"column":59},"end":{"row":27,"column":60},"action":"insert","lines":["o"]}],[{"start":{"row":27,"column":60},"end":{"row":27,"column":61},"action":"insert","lines":["n"],"id":10}],[{"start":{"row":27,"column":61},"end":{"row":27,"column":62},"action":"insert","lines":["a"],"id":11},{"start":{"row":27,"column":62},"end":{"row":27,"column":63},"action":"insert","lines":["w"]},{"start":{"row":27,"column":63},"end":{"row":27,"column":64},"action":"insert","lines":["s"]},{"start":{"row":27,"column":64},"end":{"row":27,"column":65},"action":"insert","lines":["."]},{"start":{"row":27,"column":65},"end":{"row":27,"column":66},"action":"insert","lines":["c"]},{"start":{"row":27,"column":66},"end":{"row":27,"column":67},"action":"insert","lines":["o"]}],[{"start":{"row":27,"column":67},"end":{"row":27,"column":68},"action":"insert","lines":["m"],"id":12}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":53},"action":"remove","lines":["'.pythonanywhere.com', "],"id":13}],[{"start":{"row":27,"column":46},"end":{"row":27,"column":47},"action":"insert","lines":[","],"id":14}],[{"start":{"row":27,"column":47},"end":{"row":27,"column":48},"action":"insert","lines":[" "],"id":15}],[{"start":{"row":27,"column":48},"end":{"row":27,"column":69},"action":"insert","lines":["'.pythonanywhere.com'"],"id":16}],[{"start":{"row":39,"column":0},"end":{"row":40,"column":0},"action":"insert","lines":["",""],"id":17}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "],"id":18}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":6},"action":"insert","lines":["''"],"id":19}],[{"start":{"row":39,"column":5},"end":{"row":39,"column":6},"action":"insert","lines":["b"],"id":20},{"start":{"row":39,"column":6},"end":{"row":39,"column":7},"action":"insert","lines":["l"]},{"start":{"row":39,"column":7},"end":{"row":39,"column":8},"action":"insert","lines":["o"]},{"start":{"row":39,"column":8},"end":{"row":39,"column":9},"action":"insert","lines":["g"]},{"start":{"row":39,"column":9},"end":{"row":39,"column":10},"action":"insert","lines":["."]},{"start":{"row":39,"column":10},"end":{"row":39,"column":11},"action":"insert","lines":["a"]},{"start":{"row":39,"column":11},"end":{"row":39,"column":12},"action":"insert","lines":["p"]}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":["p"],"id":21},{"start":{"row":39,"column":13},"end":{"row":39,"column":14},"action":"insert","lines":["s"]},{"start":{"row":39,"column":14},"end":{"row":39,"column":15},"action":"insert","lines":["."]}],[{"start":{"row":39,"column":15},"end":{"row":39,"column":16},"action":"insert","lines":["B"],"id":22},{"start":{"row":39,"column":16},"end":{"row":39,"column":17},"action":"insert","lines":["l"]},{"start":{"row":39,"column":17},"end":{"row":39,"column":18},"action":"insert","lines":["o"]},{"start":{"row":39,"column":18},"end":{"row":39,"column":19},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":19},"end":{"row":39,"column":20},"action":"insert","lines":["C"],"id":23},{"start":{"row":39,"column":20},"end":{"row":39,"column":21},"action":"insert","lines":["o"]},{"start":{"row":39,"column":21},"end":{"row":39,"column":22},"action":"insert","lines":["n"]},{"start":{"row":39,"column":22},"end":{"row":39,"column":23},"action":"insert","lines":["f"]},{"start":{"row":39,"column":23},"end":{"row":39,"column":24},"action":"insert","lines":["i"]},{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":26},"end":{"row":39,"column":27},"action":"insert","lines":[","],"id":24}],[{"start":{"row":27,"column":47},"end":{"row":27,"column":69},"action":"remove","lines":[" '.pythonanywhere.com'"],"id":25}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":46},"action":"remove","lines":["os.path.join(BASE_DIR, 'static')"],"id":26},{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"insert","lines":["'"]}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":17},"action":"insert","lines":["''"],"id":27}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":17},"action":"remove","lines":["''"],"id":28}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"remove","lines":["'"],"id":29}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":16},"action":"insert","lines":["''"],"id":30}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":16},"action":"remove","lines":["'"],"id":31},{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"remove","lines":["'"]}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":47},"action":"insert","lines":["os.path.join(BASE_DIR, \"static/\")"],"id":32}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":33}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":33},"action":"remove","lines":["''"],"id":34},{"start":{"row":27,"column":30},"end":{"row":27,"column":31},"action":"remove","lines":["'"]}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":35}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"insert","lines":[","],"id":36}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":[","],"id":37}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":["'"],"id":38},{"start":{"row":27,"column":30},"end":{"row":27,"column":31},"action":"remove","lines":["'"]}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":39}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"remove","lines":["''"],"id":40}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":41}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":42}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":34},"action":"remove","lines":["''"],"id":43}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":33},"action":"insert","lines":[" "],"id":44}],[{"start":{"row":27,"column":33},"end":{"row":27,"column":35},"action":"insert","lines":["''"],"id":45}],[{"start":{"row":27,"column":33},"end":{"row":27,"column":35},"action":"remove","lines":["''"],"id":46}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":33},"action":"remove","lines":["'' "],"id":47}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":48}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":33},"action":"insert","lines":[" "],"id":49}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":["'"],"id":50}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":51},"action":"insert","lines":["'.pythonanywhere.com'"],"id":51}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":52},"action":"insert","lines":[","],"id":52}],[{"start":{"row":27,"column":52},"end":{"row":27,"column":53},"action":"insert","lines":[" "],"id":53}],[{"start":{"row":27,"column":54},"end":{"row":27,"column":55},"action":"remove","lines":[" "],"id":54}],[{"start":{"row":27,"column":69},"end":{"row":27,"column":70},"action":"remove","lines":[","],"id":55}]]},"ace":{"folds":[],"scrolltop":1421.5,"scrollleft":0,"selection":{"start":{"row":121,"column":47},"end":{"row":121,"column":47},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1624160953179,"hash":"4d0060b102ea75450e9a622253c7edd2a29aa301"}
|
normal
|
{
"blob_id": "f6b38698dbed6c1a48faa86183b601f855a7f737",
"index": 5728,
"step-1": "<mask token>\n",
"step-2": "{'filter': false, 'title': 'settings.py', 'tooltip': '/mysite/settings.py',\n 'undoManager': {'mark': 53, 'position': 53, 'stack': [[{'start': {'row':\n 107, 'column': 13}, 'end': {'row': 107, 'column': 16}, 'action':\n 'remove', 'lines': ['UTC'], 'id': 2}, {'start': {'row': 107, 'column': \n 13}, 'end': {'row': 107, 'column': 23}, 'action': 'insert', 'lines': [\n 'Asia/Tokyo']}], [{'start': {'row': 105, 'column': 17}, 'end': {'row': \n 105, 'column': 22}, 'action': 'remove', 'lines': ['en-us'], 'id': 3}, {\n 'start': {'row': 105, 'column': 17}, 'end': {'row': 105, 'column': 18},\n 'action': 'insert', 'lines': ['j']}, {'start': {'row': 105, 'column': \n 18}, 'end': {'row': 105, 'column': 19}, 'action': 'insert', 'lines': [\n 'a']}], [{'start': {'row': 120, 'column': 0}, 'end': {'row': 120,\n 'column': 46}, 'action': 'insert', 'lines': [\n \"STATIC_ROOT = os.path.join(BASE_DIR, 'static')\"], 'id': 4}], [{'start':\n {'row': 27, 'column': 17}, 'end': {'row': 27, 'column': 51}, 'action':\n 'insert', 'lines': [\"'127.0.0.1', '.pythonanywhere.com'\"], 'id': 5}], [\n {'start': {'row': 27, 'column': 51}, 'end': {'row': 27, 'column': 52},\n 'action': 'insert', 'lines': [','], 'id': 6}], [{'start': {'row': 27,\n 'column': 52}, 'end': {'row': 27, 'column': 53}, 'action': 'insert',\n 'lines': [' '], 'id': 7}], [{'start': {'row': 27, 'column': 53}, 'end':\n {'row': 27, 'column': 55}, 'action': 'insert', 'lines': [\"''\"], 'id': 8\n }], [{'start': {'row': 27, 'column': 54}, 'end': {'row': 27, 'column': \n 55}, 'action': 'insert', 'lines': ['.'], 'id': 9}, {'start': {'row': 27,\n 'column': 55}, 'end': {'row': 27, 'column': 56}, 'action': 'insert',\n 'lines': ['a']}, {'start': {'row': 27, 'column': 56}, 'end': {'row': 27,\n 'column': 57}, 'action': 'insert', 'lines': ['m']}, {'start': {'row': \n 27, 'column': 57}, 'end': {'row': 27, 'column': 58}, 'action': 'insert',\n 'lines': ['a']}, {'start': {'row': 27, 'column': 58}, 'end': {'row': 27,\n 'column': 59}, 'action': 'insert', 'lines': ['z']}, {'start': {'row': \n 27, 'column': 59}, 'end': {'row': 27, 'column': 60}, 'action': 'insert',\n 'lines': ['o']}], [{'start': {'row': 27, 'column': 60}, 'end': {'row': \n 27, 'column': 61}, 'action': 'insert', 'lines': ['n'], 'id': 10}], [{\n 'start': {'row': 27, 'column': 61}, 'end': {'row': 27, 'column': 62},\n 'action': 'insert', 'lines': ['a'], 'id': 11}, {'start': {'row': 27,\n 'column': 62}, 'end': {'row': 27, 'column': 63}, 'action': 'insert',\n 'lines': ['w']}, {'start': {'row': 27, 'column': 63}, 'end': {'row': 27,\n 'column': 64}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': \n 27, 'column': 64}, 'end': {'row': 27, 'column': 65}, 'action': 'insert',\n 'lines': ['.']}, {'start': {'row': 27, 'column': 65}, 'end': {'row': 27,\n 'column': 66}, 'action': 'insert', 'lines': ['c']}, {'start': {'row': \n 27, 'column': 66}, 'end': {'row': 27, 'column': 67}, 'action': 'insert',\n 'lines': ['o']}], [{'start': {'row': 27, 'column': 67}, 'end': {'row': \n 27, 'column': 68}, 'action': 'insert', 'lines': ['m'], 'id': 12}], [{\n 'start': {'row': 27, 'column': 30}, 'end': {'row': 27, 'column': 53},\n 'action': 'remove', 'lines': [\"'.pythonanywhere.com', \"], 'id': 13}], [\n {'start': {'row': 27, 'column': 46}, 'end': {'row': 27, 'column': 47},\n 'action': 'insert', 'lines': [','], 'id': 14}], [{'start': {'row': 27,\n 'column': 47}, 'end': {'row': 27, 'column': 48}, 'action': 'insert',\n 'lines': [' '], 'id': 15}], [{'start': {'row': 27, 'column': 48}, 'end':\n {'row': 27, 'column': 69}, 'action': 'insert', 'lines': [\n \"'.pythonanywhere.com'\"], 'id': 16}], [{'start': {'row': 39, 'column': \n 0}, 'end': {'row': 40, 'column': 0}, 'action': 'insert', 'lines': ['',\n ''], 'id': 17}], [{'start': {'row': 39, 'column': 0}, 'end': {'row': 39,\n 'column': 4}, 'action': 'insert', 'lines': [' '], 'id': 18}], [{\n 'start': {'row': 39, 'column': 4}, 'end': {'row': 39, 'column': 6},\n 'action': 'insert', 'lines': [\"''\"], 'id': 19}], [{'start': {'row': 39,\n 'column': 5}, 'end': {'row': 39, 'column': 6}, 'action': 'insert',\n 'lines': ['b'], 'id': 20}, {'start': {'row': 39, 'column': 6}, 'end': {\n 'row': 39, 'column': 7}, 'action': 'insert', 'lines': ['l']}, {'start':\n {'row': 39, 'column': 7}, 'end': {'row': 39, 'column': 8}, 'action':\n 'insert', 'lines': ['o']}, {'start': {'row': 39, 'column': 8}, 'end': {\n 'row': 39, 'column': 9}, 'action': 'insert', 'lines': ['g']}, {'start':\n {'row': 39, 'column': 9}, 'end': {'row': 39, 'column': 10}, 'action':\n 'insert', 'lines': ['.']}, {'start': {'row': 39, 'column': 10}, 'end':\n {'row': 39, 'column': 11}, 'action': 'insert', 'lines': ['a']}, {\n 'start': {'row': 39, 'column': 11}, 'end': {'row': 39, 'column': 12},\n 'action': 'insert', 'lines': ['p']}], [{'start': {'row': 39, 'column': \n 12}, 'end': {'row': 39, 'column': 13}, 'action': 'insert', 'lines': [\n 'p'], 'id': 21}, {'start': {'row': 39, 'column': 13}, 'end': {'row': 39,\n 'column': 14}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': \n 39, 'column': 14}, 'end': {'row': 39, 'column': 15}, 'action': 'insert',\n 'lines': ['.']}], [{'start': {'row': 39, 'column': 15}, 'end': {'row': \n 39, 'column': 16}, 'action': 'insert', 'lines': ['B'], 'id': 22}, {\n 'start': {'row': 39, 'column': 16}, 'end': {'row': 39, 'column': 17},\n 'action': 'insert', 'lines': ['l']}, {'start': {'row': 39, 'column': 17\n }, 'end': {'row': 39, 'column': 18}, 'action': 'insert', 'lines': ['o']\n }, {'start': {'row': 39, 'column': 18}, 'end': {'row': 39, 'column': 19\n }, 'action': 'insert', 'lines': ['g']}], [{'start': {'row': 39,\n 'column': 19}, 'end': {'row': 39, 'column': 20}, 'action': 'insert',\n 'lines': ['C'], 'id': 23}, {'start': {'row': 39, 'column': 20}, 'end':\n {'row': 39, 'column': 21}, 'action': 'insert', 'lines': ['o']}, {\n 'start': {'row': 39, 'column': 21}, 'end': {'row': 39, 'column': 22},\n 'action': 'insert', 'lines': ['n']}, {'start': {'row': 39, 'column': 22\n }, 'end': {'row': 39, 'column': 23}, 'action': 'insert', 'lines': ['f']\n }, {'start': {'row': 39, 'column': 23}, 'end': {'row': 39, 'column': 24\n }, 'action': 'insert', 'lines': ['i']}, {'start': {'row': 39, 'column':\n 24}, 'end': {'row': 39, 'column': 25}, 'action': 'insert', 'lines': [\n 'g']}], [{'start': {'row': 39, 'column': 26}, 'end': {'row': 39,\n 'column': 27}, 'action': 'insert', 'lines': [','], 'id': 24}], [{\n 'start': {'row': 27, 'column': 47}, 'end': {'row': 27, 'column': 69},\n 'action': 'remove', 'lines': [\" '.pythonanywhere.com'\"], 'id': 25}], [{\n 'start': {'row': 121, 'column': 14}, 'end': {'row': 121, 'column': 46},\n 'action': 'remove', 'lines': [\"os.path.join(BASE_DIR, 'static')\"], 'id':\n 26}, {'start': {'row': 121, 'column': 14}, 'end': {'row': 121, 'column':\n 15}, 'action': 'insert', 'lines': [\"'\"]}], [{'start': {'row': 121,\n 'column': 15}, 'end': {'row': 121, 'column': 17}, 'action': 'insert',\n 'lines': [\"''\"], 'id': 27}], [{'start': {'row': 121, 'column': 15},\n 'end': {'row': 121, 'column': 17}, 'action': 'remove', 'lines': [\"''\"],\n 'id': 28}], [{'start': {'row': 121, 'column': 14}, 'end': {'row': 121,\n 'column': 15}, 'action': 'remove', 'lines': [\"'\"], 'id': 29}], [{\n 'start': {'row': 121, 'column': 14}, 'end': {'row': 121, 'column': 16},\n 'action': 'insert', 'lines': [\"''\"], 'id': 30}], [{'start': {'row': 121,\n 'column': 15}, 'end': {'row': 121, 'column': 16}, 'action': 'remove',\n 'lines': [\"'\"], 'id': 31}, {'start': {'row': 121, 'column': 14}, 'end':\n {'row': 121, 'column': 15}, 'action': 'remove', 'lines': [\"'\"]}], [{\n 'start': {'row': 121, 'column': 14}, 'end': {'row': 121, 'column': 47},\n 'action': 'insert', 'lines': ['os.path.join(BASE_DIR, \"static/\")'],\n 'id': 32}], [{'start': {'row': 27, 'column': 30}, 'end': {'row': 27,\n 'column': 32}, 'action': 'insert', 'lines': [\"''\"], 'id': 33}], [{\n 'start': {'row': 27, 'column': 31}, 'end': {'row': 27, 'column': 33},\n 'action': 'remove', 'lines': [\"''\"], 'id': 34}, {'start': {'row': 27,\n 'column': 30}, 'end': {'row': 27, 'column': 31}, 'action': 'remove',\n 'lines': [\"'\"]}], [{'start': {'row': 27, 'column': 30}, 'end': {'row': \n 27, 'column': 32}, 'action': 'insert', 'lines': [\"''\"], 'id': 35}], [{\n 'start': {'row': 27, 'column': 31}, 'end': {'row': 27, 'column': 32},\n 'action': 'insert', 'lines': [','], 'id': 36}], [{'start': {'row': 27,\n 'column': 31}, 'end': {'row': 27, 'column': 32}, 'action': 'remove',\n 'lines': [','], 'id': 37}], [{'start': {'row': 27, 'column': 31}, 'end':\n {'row': 27, 'column': 32}, 'action': 'remove', 'lines': [\"'\"], 'id': 38\n }, {'start': {'row': 27, 'column': 30}, 'end': {'row': 27, 'column': 31\n }, 'action': 'remove', 'lines': [\"'\"]}], [{'start': {'row': 27,\n 'column': 30}, 'end': {'row': 27, 'column': 32}, 'action': 'insert',\n 'lines': [\"''\"], 'id': 39}], [{'start': {'row': 27, 'column': 30},\n 'end': {'row': 27, 'column': 32}, 'action': 'remove', 'lines': [\"''\"],\n 'id': 40}], [{'start': {'row': 27, 'column': 30}, 'end': {'row': 27,\n 'column': 32}, 'action': 'insert', 'lines': [\"''\"], 'id': 41}], [{\n 'start': {'row': 27, 'column': 30}, 'end': {'row': 27, 'column': 32},\n 'action': 'insert', 'lines': [\"''\"], 'id': 42}], [{'start': {'row': 27,\n 'column': 32}, 'end': {'row': 27, 'column': 34}, 'action': 'remove',\n 'lines': [\"''\"], 'id': 43}], [{'start': {'row': 27, 'column': 32},\n 'end': {'row': 27, 'column': 33}, 'action': 'insert', 'lines': [' '],\n 'id': 44}], [{'start': {'row': 27, 'column': 33}, 'end': {'row': 27,\n 'column': 35}, 'action': 'insert', 'lines': [\"''\"], 'id': 45}], [{\n 'start': {'row': 27, 'column': 33}, 'end': {'row': 27, 'column': 35},\n 'action': 'remove', 'lines': [\"''\"], 'id': 46}], [{'start': {'row': 27,\n 'column': 30}, 'end': {'row': 27, 'column': 33}, 'action': 'remove',\n 'lines': [\"'' \"], 'id': 47}], [{'start': {'row': 27, 'column': 30},\n 'end': {'row': 27, 'column': 32}, 'action': 'insert', 'lines': [\"''\"],\n 'id': 48}], [{'start': {'row': 27, 'column': 32}, 'end': {'row': 27,\n 'column': 33}, 'action': 'insert', 'lines': [' '], 'id': 49}], [{\n 'start': {'row': 27, 'column': 31}, 'end': {'row': 27, 'column': 32},\n 'action': 'remove', 'lines': [\"'\"], 'id': 50}], [{'start': {'row': 27,\n 'column': 30}, 'end': {'row': 27, 'column': 51}, 'action': 'insert',\n 'lines': [\"'.pythonanywhere.com'\"], 'id': 51}], [{'start': {'row': 27,\n 'column': 51}, 'end': {'row': 27, 'column': 52}, 'action': 'insert',\n 'lines': [','], 'id': 52}], [{'start': {'row': 27, 'column': 52}, 'end':\n {'row': 27, 'column': 53}, 'action': 'insert', 'lines': [' '], 'id': 53\n }], [{'start': {'row': 27, 'column': 54}, 'end': {'row': 27, 'column': \n 55}, 'action': 'remove', 'lines': [' '], 'id': 54}], [{'start': {'row':\n 27, 'column': 69}, 'end': {'row': 27, 'column': 70}, 'action': 'remove',\n 'lines': [','], 'id': 55}]]}, 'ace': {'folds': [], 'scrolltop': 1421.5,\n 'scrollleft': 0, 'selection': {'start': {'row': 121, 'column': 47},\n 'end': {'row': 121, 'column': 47}, 'isBackwards': false}, 'options': {\n 'guessTabSize': true, 'useWrapMode': false, 'wrapToView': true},\n 'firstLineState': 0}, 'timestamp': 1624160953179, 'hash':\n '4d0060b102ea75450e9a622253c7edd2a29aa301'}\n",
"step-3": "{\"filter\":false,\"title\":\"settings.py\",\"tooltip\":\"/mysite/settings.py\",\"undoManager\":{\"mark\":53,\"position\":53,\"stack\":[[{\"start\":{\"row\":107,\"column\":13},\"end\":{\"row\":107,\"column\":16},\"action\":\"remove\",\"lines\":[\"UTC\"],\"id\":2},{\"start\":{\"row\":107,\"column\":13},\"end\":{\"row\":107,\"column\":23},\"action\":\"insert\",\"lines\":[\"Asia/Tokyo\"]}],[{\"start\":{\"row\":105,\"column\":17},\"end\":{\"row\":105,\"column\":22},\"action\":\"remove\",\"lines\":[\"en-us\"],\"id\":3},{\"start\":{\"row\":105,\"column\":17},\"end\":{\"row\":105,\"column\":18},\"action\":\"insert\",\"lines\":[\"j\"]},{\"start\":{\"row\":105,\"column\":18},\"end\":{\"row\":105,\"column\":19},\"action\":\"insert\",\"lines\":[\"a\"]}],[{\"start\":{\"row\":120,\"column\":0},\"end\":{\"row\":120,\"column\":46},\"action\":\"insert\",\"lines\":[\"STATIC_ROOT = os.path.join(BASE_DIR, 'static')\"],\"id\":4}],[{\"start\":{\"row\":27,\"column\":17},\"end\":{\"row\":27,\"column\":51},\"action\":\"insert\",\"lines\":[\"'127.0.0.1', '.pythonanywhere.com'\"],\"id\":5}],[{\"start\":{\"row\":27,\"column\":51},\"end\":{\"row\":27,\"column\":52},\"action\":\"insert\",\"lines\":[\",\"],\"id\":6}],[{\"start\":{\"row\":27,\"column\":52},\"end\":{\"row\":27,\"column\":53},\"action\":\"insert\",\"lines\":[\" \"],\"id\":7}],[{\"start\":{\"row\":27,\"column\":53},\"end\":{\"row\":27,\"column\":55},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":8}],[{\"start\":{\"row\":27,\"column\":54},\"end\":{\"row\":27,\"column\":55},\"action\":\"insert\",\"lines\":[\".\"],\"id\":9},{\"start\":{\"row\":27,\"column\":55},\"end\":{\"row\":27,\"column\":56},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":27,\"column\":56},\"end\":{\"row\":27,\"column\":57},\"action\":\"insert\",\"lines\":[\"m\"]},{\"start\":{\"row\":27,\"column\":57},\"end\":{\"row\":27,\"column\":58},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":27,\"column\":58},\"end\":{\"row\":27,\"column\":59},\"action\":\"insert\",\"lines\":[\"z\"]},{\"start\":{\"row\":27,\"column\":59},\"end\":{\"row\":27,\"column\":60},\"action\":\"insert\",\"lines\":[\"o\"]}],[{\"start\":{\"row\":27,\"column\":60},\"end\":{\"row\":27,\"column\":61},\"action\":\"insert\",\"lines\":[\"n\"],\"id\":10}],[{\"start\":{\"row\":27,\"column\":61},\"end\":{\"row\":27,\"column\":62},\"action\":\"insert\",\"lines\":[\"a\"],\"id\":11},{\"start\":{\"row\":27,\"column\":62},\"end\":{\"row\":27,\"column\":63},\"action\":\"insert\",\"lines\":[\"w\"]},{\"start\":{\"row\":27,\"column\":63},\"end\":{\"row\":27,\"column\":64},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":27,\"column\":64},\"end\":{\"row\":27,\"column\":65},\"action\":\"insert\",\"lines\":[\".\"]},{\"start\":{\"row\":27,\"column\":65},\"end\":{\"row\":27,\"column\":66},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":27,\"column\":66},\"end\":{\"row\":27,\"column\":67},\"action\":\"insert\",\"lines\":[\"o\"]}],[{\"start\":{\"row\":27,\"column\":67},\"end\":{\"row\":27,\"column\":68},\"action\":\"insert\",\"lines\":[\"m\"],\"id\":12}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":53},\"action\":\"remove\",\"lines\":[\"'.pythonanywhere.com', \"],\"id\":13}],[{\"start\":{\"row\":27,\"column\":46},\"end\":{\"row\":27,\"column\":47},\"action\":\"insert\",\"lines\":[\",\"],\"id\":14}],[{\"start\":{\"row\":27,\"column\":47},\"end\":{\"row\":27,\"column\":48},\"action\":\"insert\",\"lines\":[\" \"],\"id\":15}],[{\"start\":{\"row\":27,\"column\":48},\"end\":{\"row\":27,\"column\":69},\"action\":\"insert\",\"lines\":[\"'.pythonanywhere.com'\"],\"id\":16}],[{\"start\":{\"row\":39,\"column\":0},\"end\":{\"row\":40,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":17}],[{\"start\":{\"row\":39,\"column\":0},\"end\":{\"row\":39,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":18}],[{\"start\":{\"row\":39,\"column\":4},\"end\":{\"row\":39,\"column\":6},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":19}],[{\"start\":{\"row\":39,\"column\":5},\"end\":{\"row\":39,\"column\":6},\"action\":\"insert\",\"lines\":[\"b\"],\"id\":20},{\"start\":{\"row\":39,\"column\":6},\"end\":{\"row\":39,\"column\":7},\"action\":\"insert\",\"lines\":[\"l\"]},{\"start\":{\"row\":39,\"column\":7},\"end\":{\"row\":39,\"column\":8},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":39,\"column\":8},\"end\":{\"row\":39,\"column\":9},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":39,\"column\":9},\"end\":{\"row\":39,\"column\":10},\"action\":\"insert\",\"lines\":[\".\"]},{\"start\":{\"row\":39,\"column\":10},\"end\":{\"row\":39,\"column\":11},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":39,\"column\":11},\"end\":{\"row\":39,\"column\":12},\"action\":\"insert\",\"lines\":[\"p\"]}],[{\"start\":{\"row\":39,\"column\":12},\"end\":{\"row\":39,\"column\":13},\"action\":\"insert\",\"lines\":[\"p\"],\"id\":21},{\"start\":{\"row\":39,\"column\":13},\"end\":{\"row\":39,\"column\":14},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":39,\"column\":14},\"end\":{\"row\":39,\"column\":15},\"action\":\"insert\",\"lines\":[\".\"]}],[{\"start\":{\"row\":39,\"column\":15},\"end\":{\"row\":39,\"column\":16},\"action\":\"insert\",\"lines\":[\"B\"],\"id\":22},{\"start\":{\"row\":39,\"column\":16},\"end\":{\"row\":39,\"column\":17},\"action\":\"insert\",\"lines\":[\"l\"]},{\"start\":{\"row\":39,\"column\":17},\"end\":{\"row\":39,\"column\":18},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":39,\"column\":18},\"end\":{\"row\":39,\"column\":19},\"action\":\"insert\",\"lines\":[\"g\"]}],[{\"start\":{\"row\":39,\"column\":19},\"end\":{\"row\":39,\"column\":20},\"action\":\"insert\",\"lines\":[\"C\"],\"id\":23},{\"start\":{\"row\":39,\"column\":20},\"end\":{\"row\":39,\"column\":21},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":39,\"column\":21},\"end\":{\"row\":39,\"column\":22},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":39,\"column\":22},\"end\":{\"row\":39,\"column\":23},\"action\":\"insert\",\"lines\":[\"f\"]},{\"start\":{\"row\":39,\"column\":23},\"end\":{\"row\":39,\"column\":24},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":39,\"column\":24},\"end\":{\"row\":39,\"column\":25},\"action\":\"insert\",\"lines\":[\"g\"]}],[{\"start\":{\"row\":39,\"column\":26},\"end\":{\"row\":39,\"column\":27},\"action\":\"insert\",\"lines\":[\",\"],\"id\":24}],[{\"start\":{\"row\":27,\"column\":47},\"end\":{\"row\":27,\"column\":69},\"action\":\"remove\",\"lines\":[\" '.pythonanywhere.com'\"],\"id\":25}],[{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":46},\"action\":\"remove\",\"lines\":[\"os.path.join(BASE_DIR, 'static')\"],\"id\":26},{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":15},\"action\":\"insert\",\"lines\":[\"'\"]}],[{\"start\":{\"row\":121,\"column\":15},\"end\":{\"row\":121,\"column\":17},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":27}],[{\"start\":{\"row\":121,\"column\":15},\"end\":{\"row\":121,\"column\":17},\"action\":\"remove\",\"lines\":[\"''\"],\"id\":28}],[{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":15},\"action\":\"remove\",\"lines\":[\"'\"],\"id\":29}],[{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":16},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":30}],[{\"start\":{\"row\":121,\"column\":15},\"end\":{\"row\":121,\"column\":16},\"action\":\"remove\",\"lines\":[\"'\"],\"id\":31},{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":15},\"action\":\"remove\",\"lines\":[\"'\"]}],[{\"start\":{\"row\":121,\"column\":14},\"end\":{\"row\":121,\"column\":47},\"action\":\"insert\",\"lines\":[\"os.path.join(BASE_DIR, \\\"static/\\\")\"],\"id\":32}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":33}],[{\"start\":{\"row\":27,\"column\":31},\"end\":{\"row\":27,\"column\":33},\"action\":\"remove\",\"lines\":[\"''\"],\"id\":34},{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":31},\"action\":\"remove\",\"lines\":[\"'\"]}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":35}],[{\"start\":{\"row\":27,\"column\":31},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\",\"],\"id\":36}],[{\"start\":{\"row\":27,\"column\":31},\"end\":{\"row\":27,\"column\":32},\"action\":\"remove\",\"lines\":[\",\"],\"id\":37}],[{\"start\":{\"row\":27,\"column\":31},\"end\":{\"row\":27,\"column\":32},\"action\":\"remove\",\"lines\":[\"'\"],\"id\":38},{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":31},\"action\":\"remove\",\"lines\":[\"'\"]}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":39}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"remove\",\"lines\":[\"''\"],\"id\":40}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":41}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":42}],[{\"start\":{\"row\":27,\"column\":32},\"end\":{\"row\":27,\"column\":34},\"action\":\"remove\",\"lines\":[\"''\"],\"id\":43}],[{\"start\":{\"row\":27,\"column\":32},\"end\":{\"row\":27,\"column\":33},\"action\":\"insert\",\"lines\":[\" \"],\"id\":44}],[{\"start\":{\"row\":27,\"column\":33},\"end\":{\"row\":27,\"column\":35},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":45}],[{\"start\":{\"row\":27,\"column\":33},\"end\":{\"row\":27,\"column\":35},\"action\":\"remove\",\"lines\":[\"''\"],\"id\":46}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":33},\"action\":\"remove\",\"lines\":[\"'' \"],\"id\":47}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":32},\"action\":\"insert\",\"lines\":[\"''\"],\"id\":48}],[{\"start\":{\"row\":27,\"column\":32},\"end\":{\"row\":27,\"column\":33},\"action\":\"insert\",\"lines\":[\" \"],\"id\":49}],[{\"start\":{\"row\":27,\"column\":31},\"end\":{\"row\":27,\"column\":32},\"action\":\"remove\",\"lines\":[\"'\"],\"id\":50}],[{\"start\":{\"row\":27,\"column\":30},\"end\":{\"row\":27,\"column\":51},\"action\":\"insert\",\"lines\":[\"'.pythonanywhere.com'\"],\"id\":51}],[{\"start\":{\"row\":27,\"column\":51},\"end\":{\"row\":27,\"column\":52},\"action\":\"insert\",\"lines\":[\",\"],\"id\":52}],[{\"start\":{\"row\":27,\"column\":52},\"end\":{\"row\":27,\"column\":53},\"action\":\"insert\",\"lines\":[\" \"],\"id\":53}],[{\"start\":{\"row\":27,\"column\":54},\"end\":{\"row\":27,\"column\":55},\"action\":\"remove\",\"lines\":[\" \"],\"id\":54}],[{\"start\":{\"row\":27,\"column\":69},\"end\":{\"row\":27,\"column\":70},\"action\":\"remove\",\"lines\":[\",\"],\"id\":55}]]},\"ace\":{\"folds\":[],\"scrolltop\":1421.5,\"scrollleft\":0,\"selection\":{\"start\":{\"row\":121,\"column\":47},\"end\":{\"row\":121,\"column\":47},\"isBackwards\":false},\"options\":{\"guessTabSize\":true,\"useWrapMode\":false,\"wrapToView\":true},\"firstLineState\":0},\"timestamp\":1624160953179,\"hash\":\"4d0060b102ea75450e9a622253c7edd2a29aa301\"}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Search_record(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
searchCount = models.IntegerField(blank=True)
zhihu_type = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.title
class DoubanTopic(models.Model):
group_id = models.CharField(max_length=10)
url = models.CharField(max_length=100)
title = models.CharField(max_length=100)
create_time = models.CharField(max_length=20)
author_name = models.CharField(max_length=50)
author_url = models.CharField(max_length=200)
user_img_small = models.CharField(max_length=200)
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ZhihuSubject(models.Model):
title = models.CharField(max_length=200)
url = models.CharField(max_length=100)
zhihu_type = models.IntegerField()
def __unicode__(self):
return self.title
class ZhihuQuestion(models.Model):
subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')
answer_url = models.CharField(max_length=200)
author = models.CharField(max_length=100)
author_url = models.CharField(max_length=200, null=True)
title = models.CharField(max_length=100, default='')
def __unicode__(self):
return self.title
class ZhihuImage(models.Model):
question = models.ForeignKey(ZhihuQuestion, related_name='question_image')
origin_url = models.CharField(max_length=200)
def __unicode__(self):
return self.origin_url
class DoubanImage(models.Model):
topic = models.ForeignKey(DoubanTopic, related_name='topic_image')
origin_url = models.CharField(max_length=200)
cos_url = models.CharField(max_length=200, default='', blank=True)
type = models.IntegerField(default=0)
def __unicode__(self):
return self.origin_url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Comment(models.Model):
article = models.ForeignKey(Article, related_name='article_comment')
content = models.TextField(max_length=1000, default='')
create_time = models.DateTimeField(auto_now_add=True)
user_id = models.CharField(max_length=100, blank=False)
def __unicode__(self):
return self.content
class ZoneSubject(models.Model):
title = models.CharField(max_length=20, default='', blank=True)
image_url = models.CharField(max_length=500)
create_time = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.image_url
class Search_record(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
searchCount = models.IntegerField(blank=True)
zhihu_type = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.title
class DoubanTopic(models.Model):
group_id = models.CharField(max_length=10)
url = models.CharField(max_length=100)
title = models.CharField(max_length=100)
create_time = models.CharField(max_length=20)
author_name = models.CharField(max_length=50)
author_url = models.CharField(max_length=200)
user_img_small = models.CharField(max_length=200)
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ZhihuSubject(models.Model):
title = models.CharField(max_length=200)
url = models.CharField(max_length=100)
zhihu_type = models.IntegerField()
def __unicode__(self):
return self.title
class ZhihuQuestion(models.Model):
subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')
answer_url = models.CharField(max_length=200)
author = models.CharField(max_length=100)
author_url = models.CharField(max_length=200, null=True)
title = models.CharField(max_length=100, default='')
def __unicode__(self):
return self.title
class ZhihuImage(models.Model):
question = models.ForeignKey(ZhihuQuestion, related_name='question_image')
origin_url = models.CharField(max_length=200)
def __unicode__(self):
return self.origin_url
class DoubanImage(models.Model):
topic = models.ForeignKey(DoubanTopic, related_name='topic_image')
origin_url = models.CharField(max_length=200)
cos_url = models.CharField(max_length=200, default='', blank=True)
type = models.IntegerField(default=0)
def __unicode__(self):
return self.origin_url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Article(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Comment(models.Model):
article = models.ForeignKey(Article, related_name='article_comment')
content = models.TextField(max_length=1000, default='')
create_time = models.DateTimeField(auto_now_add=True)
user_id = models.CharField(max_length=100, blank=False)
def __unicode__(self):
return self.content
class ZoneSubject(models.Model):
title = models.CharField(max_length=20, default='', blank=True)
image_url = models.CharField(max_length=500)
create_time = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.image_url
class Search_record(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
searchCount = models.IntegerField(blank=True)
zhihu_type = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.title
class DoubanTopic(models.Model):
group_id = models.CharField(max_length=10)
url = models.CharField(max_length=100)
title = models.CharField(max_length=100)
create_time = models.CharField(max_length=20)
author_name = models.CharField(max_length=50)
author_url = models.CharField(max_length=200)
user_img_small = models.CharField(max_length=200)
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ZhihuSubject(models.Model):
title = models.CharField(max_length=200)
url = models.CharField(max_length=100)
zhihu_type = models.IntegerField()
def __unicode__(self):
return self.title
class ZhihuQuestion(models.Model):
subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')
answer_url = models.CharField(max_length=200)
author = models.CharField(max_length=100)
author_url = models.CharField(max_length=200, null=True)
title = models.CharField(max_length=100, default='')
def __unicode__(self):
return self.title
class ZhihuImage(models.Model):
question = models.ForeignKey(ZhihuQuestion, related_name='question_image')
origin_url = models.CharField(max_length=200)
def __unicode__(self):
return self.origin_url
class DoubanImage(models.Model):
topic = models.ForeignKey(DoubanTopic, related_name='topic_image')
origin_url = models.CharField(max_length=200)
cos_url = models.CharField(max_length=200, default='', blank=True)
type = models.IntegerField(default=0)
def __unicode__(self):
return self.origin_url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Article(models.Model):
title = models.CharField(max_length=200)
author = models.CharField(max_length=100, default='admin')
content = models.TextField(max_length=5000)
hide_content = models.TextField(max_length=1000, default='', blank=True)
create_time = models.DateTimeField(auto_now_add=True)
visible = models.BooleanField(default=True)
description = models.CharField(max_length=200, default='', blank=True)
cover = models.CharField(max_length=100, default='/')
def __unicode__(self):
return self.title
class Comment(models.Model):
article = models.ForeignKey(Article, related_name='article_comment')
content = models.TextField(max_length=1000, default='')
create_time = models.DateTimeField(auto_now_add=True)
user_id = models.CharField(max_length=100, blank=False)
def __unicode__(self):
return self.content
class ZoneSubject(models.Model):
title = models.CharField(max_length=20, default='', blank=True)
image_url = models.CharField(max_length=500)
create_time = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.image_url
class Search_record(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
searchCount = models.IntegerField(blank=True)
zhihu_type = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.title
class DoubanTopic(models.Model):
group_id = models.CharField(max_length=10)
url = models.CharField(max_length=100)
title = models.CharField(max_length=100)
create_time = models.CharField(max_length=20)
author_name = models.CharField(max_length=50)
author_url = models.CharField(max_length=200)
user_img_small = models.CharField(max_length=200)
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ZhihuSubject(models.Model):
title = models.CharField(max_length=200)
url = models.CharField(max_length=100)
zhihu_type = models.IntegerField()
def __unicode__(self):
return self.title
class ZhihuQuestion(models.Model):
subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')
answer_url = models.CharField(max_length=200)
author = models.CharField(max_length=100)
author_url = models.CharField(max_length=200, null=True)
title = models.CharField(max_length=100, default='')
def __unicode__(self):
return self.title
class ZhihuImage(models.Model):
question = models.ForeignKey(ZhihuQuestion, related_name='question_image')
origin_url = models.CharField(max_length=200)
def __unicode__(self):
return self.origin_url
class DoubanImage(models.Model):
topic = models.ForeignKey(DoubanTopic, related_name='topic_image')
origin_url = models.CharField(max_length=200)
cos_url = models.CharField(max_length=200, default='', blank=True)
type = models.IntegerField(default=0)
def __unicode__(self):
return self.origin_url
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=200)
author = models.CharField(max_length=100, default='admin')
content = models.TextField(max_length=5000)
hide_content = models.TextField(max_length=1000, default='', blank=True)
create_time = models.DateTimeField(auto_now_add=True)
visible = models.BooleanField(default=True)
description = models.CharField(max_length=200, default='', blank=True)
# comment = models.ForeignKey(Comment, related_name='article_comment')
cover = models.CharField(max_length=100, default='/')
def __unicode__(self):
return self.title
class Comment(models.Model):
article = models.ForeignKey(Article, related_name='article_comment')
content = models.TextField(max_length=1000, default='')
create_time = models.DateTimeField(auto_now_add=True)
user_id = models.CharField(max_length=100, blank=False)
def __unicode__(self):
return self.content
class ZoneSubject(models.Model):
title = models.CharField(max_length=20, default='', blank=True)
image_url = models.CharField(max_length=500)
create_time = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return self.image_url
class Search_record(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
searchCount = models.IntegerField(blank=True)
zhihu_type = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.title
class DoubanTopic(models.Model):
group_id = models.CharField(max_length=10)
url = models.CharField(max_length=100)
title = models.CharField(max_length=100)
create_time = models.CharField(max_length=20)
author_name = models.CharField(max_length=50)
author_url = models.CharField(max_length=200)
user_img_small = models.CharField(max_length=200)
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ZhihuSubject(models.Model):
title = models.CharField(max_length=200)
url = models.CharField(max_length=100)
zhihu_type = models.IntegerField()
def __unicode__(self):
return self.title
class ZhihuQuestion(models.Model):
subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')
answer_url = models.CharField(max_length=200)
author = models.CharField(max_length=100)
author_url = models.CharField(max_length=200,null=True)
title = models.CharField(max_length=100, default='')
def __unicode__(self):
return self.title
class ZhihuImage(models.Model):
question = models.ForeignKey(ZhihuQuestion, related_name='question_image')
origin_url = models.CharField(max_length=200)
def __unicode__(self):
return self.origin_url
class DoubanImage(models.Model):
topic = models.ForeignKey(DoubanTopic, related_name='topic_image')
origin_url = models.CharField(max_length=200)
cos_url = models.CharField(max_length=200, default='',blank=True)
type = models.IntegerField(default=0)
def __unicode__(self):
return self.origin_url
|
flexible
|
{
"blob_id": "0bc53130a4248178f4c3fabbae7d2546f0d5b8fd",
"index": 5996,
"step-1": "<mask token>\n\n\nclass Search_record(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n searchCount = models.IntegerField(blank=True)\n zhihu_type = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass DoubanTopic(models.Model):\n group_id = models.CharField(max_length=10)\n url = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n create_time = models.CharField(max_length=20)\n author_name = models.CharField(max_length=50)\n author_url = models.CharField(max_length=200)\n user_img_small = models.CharField(max_length=200)\n visible = models.BooleanField(default=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuSubject(models.Model):\n title = models.CharField(max_length=200)\n url = models.CharField(max_length=100)\n zhihu_type = models.IntegerField()\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuQuestion(models.Model):\n subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')\n answer_url = models.CharField(max_length=200)\n author = models.CharField(max_length=100)\n author_url = models.CharField(max_length=200, null=True)\n title = models.CharField(max_length=100, default='')\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuImage(models.Model):\n question = models.ForeignKey(ZhihuQuestion, related_name='question_image')\n origin_url = models.CharField(max_length=200)\n\n def __unicode__(self):\n return self.origin_url\n\n\nclass DoubanImage(models.Model):\n topic = models.ForeignKey(DoubanTopic, related_name='topic_image')\n origin_url = models.CharField(max_length=200)\n cos_url = models.CharField(max_length=200, default='', blank=True)\n type = models.IntegerField(default=0)\n\n def __unicode__(self):\n return self.origin_url\n",
"step-2": "<mask token>\n\n\nclass Comment(models.Model):\n article = models.ForeignKey(Article, related_name='article_comment')\n content = models.TextField(max_length=1000, default='')\n create_time = models.DateTimeField(auto_now_add=True)\n user_id = models.CharField(max_length=100, blank=False)\n\n def __unicode__(self):\n return self.content\n\n\nclass ZoneSubject(models.Model):\n title = models.CharField(max_length=20, default='', blank=True)\n image_url = models.CharField(max_length=500)\n create_time = models.DateTimeField(default=timezone.now)\n\n def __unicode__(self):\n return self.image_url\n\n\nclass Search_record(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n searchCount = models.IntegerField(blank=True)\n zhihu_type = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass DoubanTopic(models.Model):\n group_id = models.CharField(max_length=10)\n url = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n create_time = models.CharField(max_length=20)\n author_name = models.CharField(max_length=50)\n author_url = models.CharField(max_length=200)\n user_img_small = models.CharField(max_length=200)\n visible = models.BooleanField(default=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuSubject(models.Model):\n title = models.CharField(max_length=200)\n url = models.CharField(max_length=100)\n zhihu_type = models.IntegerField()\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuQuestion(models.Model):\n subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')\n answer_url = models.CharField(max_length=200)\n author = models.CharField(max_length=100)\n author_url = models.CharField(max_length=200, null=True)\n title = models.CharField(max_length=100, default='')\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuImage(models.Model):\n question = models.ForeignKey(ZhihuQuestion, related_name='question_image')\n origin_url = models.CharField(max_length=200)\n\n def __unicode__(self):\n return self.origin_url\n\n\nclass DoubanImage(models.Model):\n topic = models.ForeignKey(DoubanTopic, related_name='topic_image')\n origin_url = models.CharField(max_length=200)\n cos_url = models.CharField(max_length=200, default='', blank=True)\n type = models.IntegerField(default=0)\n\n def __unicode__(self):\n return self.origin_url\n",
"step-3": "<mask token>\n\n\nclass Article(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Comment(models.Model):\n article = models.ForeignKey(Article, related_name='article_comment')\n content = models.TextField(max_length=1000, default='')\n create_time = models.DateTimeField(auto_now_add=True)\n user_id = models.CharField(max_length=100, blank=False)\n\n def __unicode__(self):\n return self.content\n\n\nclass ZoneSubject(models.Model):\n title = models.CharField(max_length=20, default='', blank=True)\n image_url = models.CharField(max_length=500)\n create_time = models.DateTimeField(default=timezone.now)\n\n def __unicode__(self):\n return self.image_url\n\n\nclass Search_record(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n searchCount = models.IntegerField(blank=True)\n zhihu_type = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass DoubanTopic(models.Model):\n group_id = models.CharField(max_length=10)\n url = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n create_time = models.CharField(max_length=20)\n author_name = models.CharField(max_length=50)\n author_url = models.CharField(max_length=200)\n user_img_small = models.CharField(max_length=200)\n visible = models.BooleanField(default=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuSubject(models.Model):\n title = models.CharField(max_length=200)\n url = models.CharField(max_length=100)\n zhihu_type = models.IntegerField()\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuQuestion(models.Model):\n subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')\n answer_url = models.CharField(max_length=200)\n author = models.CharField(max_length=100)\n author_url = models.CharField(max_length=200, null=True)\n title = models.CharField(max_length=100, default='')\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuImage(models.Model):\n question = models.ForeignKey(ZhihuQuestion, related_name='question_image')\n origin_url = models.CharField(max_length=200)\n\n def __unicode__(self):\n return self.origin_url\n\n\nclass DoubanImage(models.Model):\n topic = models.ForeignKey(DoubanTopic, related_name='topic_image')\n origin_url = models.CharField(max_length=200)\n cos_url = models.CharField(max_length=200, default='', blank=True)\n type = models.IntegerField(default=0)\n\n def __unicode__(self):\n return self.origin_url\n",
"step-4": "<mask token>\n\n\nclass Article(models.Model):\n title = models.CharField(max_length=200)\n author = models.CharField(max_length=100, default='admin')\n content = models.TextField(max_length=5000)\n hide_content = models.TextField(max_length=1000, default='', blank=True)\n create_time = models.DateTimeField(auto_now_add=True)\n visible = models.BooleanField(default=True)\n description = models.CharField(max_length=200, default='', blank=True)\n cover = models.CharField(max_length=100, default='/')\n\n def __unicode__(self):\n return self.title\n\n\nclass Comment(models.Model):\n article = models.ForeignKey(Article, related_name='article_comment')\n content = models.TextField(max_length=1000, default='')\n create_time = models.DateTimeField(auto_now_add=True)\n user_id = models.CharField(max_length=100, blank=False)\n\n def __unicode__(self):\n return self.content\n\n\nclass ZoneSubject(models.Model):\n title = models.CharField(max_length=20, default='', blank=True)\n image_url = models.CharField(max_length=500)\n create_time = models.DateTimeField(default=timezone.now)\n\n def __unicode__(self):\n return self.image_url\n\n\nclass Search_record(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n searchCount = models.IntegerField(blank=True)\n zhihu_type = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass DoubanTopic(models.Model):\n group_id = models.CharField(max_length=10)\n url = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n create_time = models.CharField(max_length=20)\n author_name = models.CharField(max_length=50)\n author_url = models.CharField(max_length=200)\n user_img_small = models.CharField(max_length=200)\n visible = models.BooleanField(default=True)\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuSubject(models.Model):\n title = models.CharField(max_length=200)\n url = models.CharField(max_length=100)\n zhihu_type = models.IntegerField()\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuQuestion(models.Model):\n subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')\n answer_url = models.CharField(max_length=200)\n author = models.CharField(max_length=100)\n author_url = models.CharField(max_length=200, null=True)\n title = models.CharField(max_length=100, default='')\n\n def __unicode__(self):\n return self.title\n\n\nclass ZhihuImage(models.Model):\n question = models.ForeignKey(ZhihuQuestion, related_name='question_image')\n origin_url = models.CharField(max_length=200)\n\n def __unicode__(self):\n return self.origin_url\n\n\nclass DoubanImage(models.Model):\n topic = models.ForeignKey(DoubanTopic, related_name='topic_image')\n origin_url = models.CharField(max_length=200)\n cos_url = models.CharField(max_length=200, default='', blank=True)\n type = models.IntegerField(default=0)\n\n def __unicode__(self):\n return self.origin_url\n",
"step-5": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\n\n\nclass Article(models.Model):\n title = models.CharField(max_length=200)\n author = models.CharField(max_length=100, default='admin')\n content = models.TextField(max_length=5000)\n hide_content = models.TextField(max_length=1000, default='', blank=True)\n create_time = models.DateTimeField(auto_now_add=True)\n visible = models.BooleanField(default=True)\n description = models.CharField(max_length=200, default='', blank=True)\n # comment = models.ForeignKey(Comment, related_name='article_comment')\n cover = models.CharField(max_length=100, default='/')\n def __unicode__(self):\n return self.title\n\nclass Comment(models.Model):\n article = models.ForeignKey(Article, related_name='article_comment')\n content = models.TextField(max_length=1000, default='')\n create_time = models.DateTimeField(auto_now_add=True)\n user_id = models.CharField(max_length=100, blank=False)\n def __unicode__(self):\n return self.content\n\n\nclass ZoneSubject(models.Model):\n title = models.CharField(max_length=20, default='', blank=True)\n image_url = models.CharField(max_length=500)\n create_time = models.DateTimeField(default=timezone.now)\n def __unicode__(self):\n return self.image_url\n\n\nclass Search_record(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n searchCount = models.IntegerField(blank=True)\n zhihu_type = models.IntegerField(blank=True, null=True)\n def __unicode__(self):\n return self.title\n\nclass DoubanTopic(models.Model):\n group_id = models.CharField(max_length=10)\n url = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n create_time = models.CharField(max_length=20)\n author_name = models.CharField(max_length=50)\n author_url = models.CharField(max_length=200)\n user_img_small = models.CharField(max_length=200)\n visible = models.BooleanField(default=True)\n def __unicode__(self):\n return self.title\n\nclass ZhihuSubject(models.Model):\n title = models.CharField(max_length=200)\n url = models.CharField(max_length=100)\n zhihu_type = models.IntegerField()\n def __unicode__(self):\n return self.title\n\nclass ZhihuQuestion(models.Model):\n subject = models.ForeignKey(ZhihuSubject, related_name='subject_question')\n answer_url = models.CharField(max_length=200)\n author = models.CharField(max_length=100)\n author_url = models.CharField(max_length=200,null=True)\n title = models.CharField(max_length=100, default='')\n def __unicode__(self):\n return self.title\n\nclass ZhihuImage(models.Model):\n question = models.ForeignKey(ZhihuQuestion, related_name='question_image')\n origin_url = models.CharField(max_length=200)\n def __unicode__(self):\n return self.origin_url\n\n\nclass DoubanImage(models.Model):\n topic = models.ForeignKey(DoubanTopic, related_name='topic_image')\n origin_url = models.CharField(max_length=200)\n cos_url = models.CharField(max_length=200, default='',blank=True)\n type = models.IntegerField(default=0)\n def __unicode__(self):\n return self.origin_url\n\n\n\n\n",
"step-ids": [
18,
24,
25,
27,
29
]
}
|
[
18,
24,
25,
27,
29
] |
<|reserved_special_token_0|>
class Ui_MainWindow(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_MainWindow(object):
<|reserved_special_token_0|>
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.pushButton_3.setText(_translate('MainWindow', 'Download'))
self.label_2.setText(_translate('MainWindow', 'Save location'))
self.pushButton_2.setText(_translate('MainWindow', 'Search'))
self.label_3.setText(_translate('MainWindow', 'Qualiti'))
self.pushButton.setText(_translate('MainWindow', 'Browse'))
self.label.setText(_translate('MainWindow', 'Video URL'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(500, 251)
MainWindow.setStyleSheet(
"""/*
Neon Style Sheet for QT Applications (QpushButton)
Author: Jaime A. Quiroga P.
Company: GTRONICK
Last updated: 24/10/2020, 15:42.
Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss
*/
QPushButton{
border-style: solid;
border-color: #050a0e;
border-width: 1px;
border-radius: 5px;
color: #d3dae3;
padding: 2px;
background-color: #100E19;
}
QPushButton::default{
border-style: solid;
border-color: #050a0e;
border-width: 1px;
border-radius: 5px;
color: #FFFFFF;
padding: 2px;
background-color: #151a1e;
}
QPushButton:hover{
border-style: solid;
border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);
border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);
border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);
border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);
border-width: 2px;
border-radius: 1px;
color: #d3dae3;
padding: 2px;
}
QPushButton:pressed{
border-style: solid;
border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);
border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);
border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);
border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);
border-width: 2px;
border-radius: 1px;
color: #d3dae3;
padding: 2px;
}"""
)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))
self.pushButton_3.setStyleSheet('')
self.pushButton_3.setObjectName('pushButton_3')
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))
self.lineEdit.setObjectName('lineEdit')
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))
self.label_2.setObjectName('label_2')
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))
self.lineEdit_2.setObjectName('lineEdit_2')
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))
self.pushButton_2.setStyleSheet('')
self.pushButton_2.setObjectName('pushButton_2')
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))
self.label_3.setObjectName('label_3')
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))
self.pushButton.setStyleSheet('')
self.pushButton.setObjectName('pushButton')
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))
self.label.setObjectName('label')
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))
self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')
self.comboBox.setObjectName('comboBox')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName('menubar')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.pushButton_3.setText(_translate('MainWindow', 'Download'))
self.label_2.setText(_translate('MainWindow', 'Save location'))
self.pushButton_2.setText(_translate('MainWindow', 'Search'))
self.label_3.setText(_translate('MainWindow', 'Qualiti'))
self.pushButton.setText(_translate('MainWindow', 'Browse'))
self.label.setText(_translate('MainWindow', 'Video URL'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(500, 251)
MainWindow.setStyleSheet(
"""/*
Neon Style Sheet for QT Applications (QpushButton)
Author: Jaime A. Quiroga P.
Company: GTRONICK
Last updated: 24/10/2020, 15:42.
Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss
*/
QPushButton{
border-style: solid;
border-color: #050a0e;
border-width: 1px;
border-radius: 5px;
color: #d3dae3;
padding: 2px;
background-color: #100E19;
}
QPushButton::default{
border-style: solid;
border-color: #050a0e;
border-width: 1px;
border-radius: 5px;
color: #FFFFFF;
padding: 2px;
background-color: #151a1e;
}
QPushButton:hover{
border-style: solid;
border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);
border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);
border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);
border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);
border-width: 2px;
border-radius: 1px;
color: #d3dae3;
padding: 2px;
}
QPushButton:pressed{
border-style: solid;
border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);
border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);
border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);
border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);
border-width: 2px;
border-radius: 1px;
color: #d3dae3;
padding: 2px;
}"""
)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))
self.pushButton_3.setStyleSheet('')
self.pushButton_3.setObjectName('pushButton_3')
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))
self.lineEdit.setObjectName('lineEdit')
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))
self.label_2.setObjectName('label_2')
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))
self.lineEdit_2.setObjectName('lineEdit_2')
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))
self.pushButton_2.setStyleSheet('')
self.pushButton_2.setObjectName('pushButton_2')
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))
self.label_3.setObjectName('label_3')
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))
self.pushButton.setStyleSheet('')
self.pushButton.setObjectName('pushButton')
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))
self.label.setObjectName('label')
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))
self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')
self.comboBox.setObjectName('comboBox')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName('menubar')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.pushButton_3.setText(_translate('MainWindow', 'Download'))
self.label_2.setText(_translate('MainWindow', 'Save location'))
self.pushButton_2.setText(_translate('MainWindow', 'Search'))
self.label_3.setText(_translate('MainWindow', 'Qualiti'))
self.pushButton.setText(_translate('MainWindow', 'Browse'))
self.label.setText(_translate('MainWindow', 'Video URL'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 251)
MainWindow.setStyleSheet("/*\n"
"Neon Style Sheet for QT Applications (QpushButton)\n"
"Author: Jaime A. Quiroga P.\n"
"Company: GTRONICK\n"
"Last updated: 24/10/2020, 15:42.\n"
"Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n"
"*/\n"
"QPushButton{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
" background-color: #100E19;\n"
"}\n"
"QPushButton::default{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #FFFFFF;\n"
" padding: 2px;\n"
" background-color: #151a1e;\n"
"}\n"
"QPushButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}\n"
"QPushButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))
self.pushButton_3.setStyleSheet("")
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))
self.label_2.setObjectName("label_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))
self.pushButton_2.setStyleSheet("")
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))
self.pushButton.setStyleSheet("")
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))
self.label.setObjectName("label")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))
self.comboBox.setStyleSheet("background-color: rgb(101, 101, 101);")
self.comboBox.setObjectName("comboBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_3.setText(_translate("MainWindow", "Download"))
self.label_2.setText(_translate("MainWindow", "Save location"))
self.pushButton_2.setText(_translate("MainWindow", "Search"))
self.label_3.setText(_translate("MainWindow", "Qualiti"))
self.pushButton.setText(_translate("MainWindow", "Browse"))
self.label.setText(_translate("MainWindow", "Video URL"))
|
flexible
|
{
"blob_id": "2d503c93160b6f44fba2495f0ae0cf9ba0eaf9d6",
"index": 8930,
"step-1": "<mask token>\n\n\nclass Ui_MainWindow(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_MainWindow(object):\n <mask token>\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-3": "<mask token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\n \"\"\"/*\nNeon Style Sheet for QT Applications (QpushButton)\nAuthor: Jaime A. Quiroga P.\nCompany: GTRONICK\nLast updated: 24/10/2020, 15:42.\nAvailable at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n*/\nQPushButton{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #d3dae3;\n padding: 2px;\n background-color: #100E19;\n}\nQPushButton::default{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #FFFFFF;\n padding: 2px;\n background-color: #151a1e;\n}\nQPushButton:hover{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\nQPushButton:pressed{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\"\"\"\n )\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet('')\n self.pushButton_3.setObjectName('pushButton_3')\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName('label_2')\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName('lineEdit_2')\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet('')\n self.pushButton_2.setObjectName('pushButton_2')\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName('label_3')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet('')\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName('label')\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')\n self.comboBox.setObjectName('comboBox')\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName('menubar')\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\n \"\"\"/*\nNeon Style Sheet for QT Applications (QpushButton)\nAuthor: Jaime A. Quiroga P.\nCompany: GTRONICK\nLast updated: 24/10/2020, 15:42.\nAvailable at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n*/\nQPushButton{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #d3dae3;\n padding: 2px;\n background-color: #100E19;\n}\nQPushButton::default{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #FFFFFF;\n padding: 2px;\n background-color: #151a1e;\n}\nQPushButton:hover{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\nQPushButton:pressed{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\"\"\"\n )\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet('')\n self.pushButton_3.setObjectName('pushButton_3')\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName('label_2')\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName('lineEdit_2')\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet('')\n self.pushButton_2.setObjectName('pushButton_2')\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName('label_3')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet('')\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName('label')\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')\n self.comboBox.setObjectName('comboBox')\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName('menubar')\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'main.ui'\n#\n# Created by: PyQt5 UI code generator 5.14.1\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\"/*\\n\"\n\"Neon Style Sheet for QT Applications (QpushButton)\\n\"\n\"Author: Jaime A. Quiroga P.\\n\"\n\"Company: GTRONICK\\n\"\n\"Last updated: 24/10/2020, 15:42.\\n\"\n\"Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\\n\"\n\"*/\\n\"\n\"QPushButton{\\n\"\n\" border-style: solid;\\n\"\n\" border-color: #050a0e;\\n\"\n\" border-width: 1px;\\n\"\n\" border-radius: 5px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\" background-color: #100E19;\\n\"\n\"}\\n\"\n\"QPushButton::default{\\n\"\n\" border-style: solid;\\n\"\n\" border-color: #050a0e;\\n\"\n\" border-width: 1px;\\n\"\n\" border-radius: 5px;\\n\"\n\" color: #FFFFFF;\\n\"\n\" padding: 2px;\\n\"\n\" background-color: #151a1e;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" border-style: solid;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\\n\"\n\" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-width: 2px;\\n\"\n\" border-radius: 1px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\" border-style: solid;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\\n\"\n\" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-width: 2px;\\n\"\n\" border-radius: 1px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\"}\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet(\"\")\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName(\"label_2\")\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet(\"\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName(\"label_3\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet(\"\")\n self.pushButton.setObjectName(\"pushButton\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName(\"label\")\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet(\"background-color: rgb(101, 101, 101);\")\n self.comboBox.setObjectName(\"comboBox\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"Download\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Save location\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Search\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Qualiti\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Browse\"))\n self.label.setText(_translate(\"MainWindow\", \"Video URL\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
<|reserved_special_token_0|>
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
def get_answer(trie, count):
print('--' * count + trie.me)
trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))
for k in trie.children.keys():
get_answer(trie.children[k], count + 1)
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
read = lambda : stdin.readline().strip()
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
def get_answer(trie, count):
print('--' * count + trie.me)
trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))
for k in trie.children.keys():
get_answer(trie.children[k], count + 1)
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from sys import stdin
read = lambda : stdin.readline().strip()
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
def get_answer(trie, count):
print('--' * count + trie.me)
trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))
for k in trie.children.keys():
get_answer(trie.children[k], count + 1)
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from sys import stdin
read = lambda: stdin.readline().strip()
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
def get_answer(trie, count):
print(("--" * count) + trie.me)
trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))
for k in trie.children.keys():
get_answer(trie.children[k], count + 1)
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
# cur에 같은 데이터가 없을 경우
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "c5605f4770d61d435cc1817bad4d5cbe0aaf1d18",
"index": 8824,
"step-1": "<mask token>\n\n\nclass Trie:\n\n def __init__(self, me, parent=None):\n self.me = me\n self.parent = parent\n self.children = {}\n\n\n<mask token>\n\n\ndef main():\n trie_dict = {}\n for i in range(int(read())):\n data = read().split()\n if data[1] not in trie_dict:\n trie_dict[data[1]] = Trie(data[1])\n cur = trie_dict[data[1]]\n for j in range(2, len(data)):\n if data[j] not in cur.children:\n cur.children[data[j]] = Trie(data[j])\n cur = cur.children[data[j]]\n trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))\n for k in trie_dict.keys():\n get_answer(trie_dict[k], 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Trie:\n\n def __init__(self, me, parent=None):\n self.me = me\n self.parent = parent\n self.children = {}\n\n\ndef get_answer(trie, count):\n print('--' * count + trie.me)\n trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))\n for k in trie.children.keys():\n get_answer(trie.children[k], count + 1)\n\n\ndef main():\n trie_dict = {}\n for i in range(int(read())):\n data = read().split()\n if data[1] not in trie_dict:\n trie_dict[data[1]] = Trie(data[1])\n cur = trie_dict[data[1]]\n for j in range(2, len(data)):\n if data[j] not in cur.children:\n cur.children[data[j]] = Trie(data[j])\n cur = cur.children[data[j]]\n trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))\n for k in trie_dict.keys():\n get_answer(trie_dict[k], 0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nread = lambda : stdin.readline().strip()\n\n\nclass Trie:\n\n def __init__(self, me, parent=None):\n self.me = me\n self.parent = parent\n self.children = {}\n\n\ndef get_answer(trie, count):\n print('--' * count + trie.me)\n trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))\n for k in trie.children.keys():\n get_answer(trie.children[k], count + 1)\n\n\ndef main():\n trie_dict = {}\n for i in range(int(read())):\n data = read().split()\n if data[1] not in trie_dict:\n trie_dict[data[1]] = Trie(data[1])\n cur = trie_dict[data[1]]\n for j in range(2, len(data)):\n if data[j] not in cur.children:\n cur.children[data[j]] = Trie(data[j])\n cur = cur.children[data[j]]\n trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))\n for k in trie_dict.keys():\n get_answer(trie_dict[k], 0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from sys import stdin\nread = lambda : stdin.readline().strip()\n\n\nclass Trie:\n\n def __init__(self, me, parent=None):\n self.me = me\n self.parent = parent\n self.children = {}\n\n\ndef get_answer(trie, count):\n print('--' * count + trie.me)\n trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))\n for k in trie.children.keys():\n get_answer(trie.children[k], count + 1)\n\n\ndef main():\n trie_dict = {}\n for i in range(int(read())):\n data = read().split()\n if data[1] not in trie_dict:\n trie_dict[data[1]] = Trie(data[1])\n cur = trie_dict[data[1]]\n for j in range(2, len(data)):\n if data[j] not in cur.children:\n cur.children[data[j]] = Trie(data[j])\n cur = cur.children[data[j]]\n trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))\n for k in trie_dict.keys():\n get_answer(trie_dict[k], 0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from sys import stdin\nread = lambda: stdin.readline().strip()\n\n\nclass Trie:\n def __init__(self, me, parent=None):\n self.me = me\n self.parent = parent\n self.children = {}\n\n\ndef get_answer(trie, count):\n print((\"--\" * count) + trie.me)\n\n trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))\n for k in trie.children.keys():\n get_answer(trie.children[k], count + 1)\n\n\ndef main():\n trie_dict = {}\n for i in range(int(read())):\n data = read().split()\n if data[1] not in trie_dict:\n trie_dict[data[1]] = Trie(data[1])\n\n cur = trie_dict[data[1]]\n for j in range(2, len(data)):\n # cur에 같은 데이터가 없을 경우\n if data[j] not in cur.children:\n cur.children[data[j]] = Trie(data[j])\n cur = cur.children[data[j]]\n\n trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))\n for k in trie_dict.keys():\n get_answer(trie_dict[k], 0)\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#Copyright [2017] [Mauro Riva <[email protected]> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
class vu_meter:
ledsColors = []
def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):
self.ledPower = ledPower
self.ledNumber = ledNumber
self.pinLeds = pinLEDs
self.adcPin = adcPin
self.adcWindow = adcWindow
self.ledsColors = []
self.adcIn = 0.0
self.adcMax = adcMax
self.adcMaxDynamic = False
# inizialize ADC
self.init_adc()
self.init_leds()
def init_adc(self):
self.adc = ADC(0)
self.adcUnit = self.adc.channel(pin=self.adcPin)
self.adcMean = 0
def init_leds(self):
self.ledsColors = []
for x in range(0, self.ledNumber):
color = self.color_vu_meter (x)
self.ledsColors.append(color)
self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only
self.ledChain.show( self.ledsColors )
def test_leds(self):
testData = self.ledsColors
for x in range(0, self.ledNumber):
testData = testData[1:] + testData[0:1]
self.ledChain.show( testData )
self.ledChain.show([])
def lighter(self, color, percent):
percent = percent / 100
if(percent == 1):
return color
if(percent == 0):
return ([0, 0, 0])
#if(percent < 0.65): # driver not working ok with percent under 0.65
# percent = 0.65
rcolor = color[0] - color[0] * (1-percent)
gcolor = color[1] - color[1] * (1-percent)
bcolor = color[2] - color[2] * (1-percent)
newcolor = ([(rcolor), (gcolor), (bcolor)])
return newcolor
def color_vu_meter(self, position):
rcolor = (255 * position) / self.ledNumber
gcolor = (255 * (self.ledNumber - position)) / self.ledNumber
bcolor= 0
newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)
return newcolor
def adc_max_dynamic(self, state = True, adcMax = 100):
self.adcMaxDynamic = state
self.adcMax = adcMax
return self.adcMaxDynamic
def adc_max(self):
return self.adcMax
def zero_calibration(self):
self.adcMean = 0
for y in range(0, self.adcWindow):
self.adcMean = self.adcMean + self.adcUnit.value()
self.adcMean = self.adcMean / self.adcWindow
return self.adcMean
def update_rms(self):
t1 = utime.ticks_ms()
power = 0
self.audioPower = 0
for x in range(0, self.adcWindow):
adc_value = self.adcUnit.value() - self.adcMean
power = power + m.pow(adc_value, 2)
power = (m.sqrt(power / self.adcWindow))
self.audioPower = power
t2 = utime.ticks_ms()
time_elapsed = t2 - t1
if(self.adcMaxDynamic):
if(self.adcMax < power):
self.adcMax = power
self.normalizedPower = power / self.adcMax
#20 * log10(sqrt(sum / count))
if(self.normalizedPower > 1):
self.normalizedPower = 1
return [time_elapsed, power]
def update_leds(self):
leds_count = m.floor(self.normalizedPower * self.ledNumber)
self.ledChain.show( self.ledsColors[1:leds_count] )
|
normal
|
{
"blob_id": "894d8d00fd05bf8648f1b95ecf30b70e7b4e841b",
"index": 8640,
"step-1": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n <mask token>\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n <mask token>\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n <mask token>\n <mask token>\n <mask token>\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-2": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n <mask token>\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-3": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n\n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show(testData)\n self.ledChain.show([])\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-4": "import math as m\nimport utime\nfrom machine import ADC\nfrom ws2812 import WS2812\n\n\nclass vu_meter:\n ledsColors = []\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n\n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show(testData)\n self.ledChain.show([])\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-5": "#Copyright [2017] [Mauro Riva <[email protected]> <lemariva.com>]\n\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n\n#http://www.apache.org/licenses/LICENSE-2.0\n\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n#The above copyright notice and this permission notice shall be\n#included in all copies or substantial portions of the Software. \n\nimport math as m\nimport utime\n\nfrom machine import ADC\nfrom ws2812 import WS2812\n\nclass vu_meter: \n ledsColors = []\n \n def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber \n self.pinLeds = pinLEDs\n self.adcPin = adcPin \n self.adcWindow = adcWindow\n self.ledsColors = [] \n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False \n # inizialize ADC\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n \n def init_leds(self):\n self.ledsColors = [] \n for x in range(0, self.ledNumber):\n color = self.color_vu_meter (x)\n self.ledsColors.append(color)\n \n self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only \n self.ledChain.show( self.ledsColors ) \n \n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show( testData ) \n self.ledChain.show([]) \n \n def lighter(self, color, percent):\n percent = percent / 100\n if(percent == 1):\n return color\n if(percent == 0):\n return ([0, 0, 0])\t\n #if(percent < 0.65):\t\t# driver not working ok with percent under 0.65 \n # percent = 0.65\n\n rcolor = color[0] - color[0] * (1-percent)\n gcolor = color[1] - color[1] * (1-percent)\n bcolor = color[2] - color[2] * (1-percent)\n newcolor = ([(rcolor), (gcolor), (bcolor)])\n return newcolor\t\t\n\n def color_vu_meter(self, position):\n rcolor = (255 * position) / self.ledNumber\n gcolor = (255 * (self.ledNumber - position)) / self.ledNumber \n bcolor= 0\n newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)\n return newcolor\n \n def adc_max_dynamic(self, state = True, adcMax = 100): \n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n \n def adc_max(self):\n return self.adcMax\n \n def zero_calibration(self):\n self.adcMean = 0 \n for y in range(0, self.adcWindow): \n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow \n return self.adcMean\n \n def update_rms(self):\n t1 = utime.ticks_ms() \n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow): \n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2) \n \n power = (m.sqrt(power / self.adcWindow))\n self.audioPower = power \n \n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1 \n \n if(self.adcMaxDynamic):\n if(self.adcMax < power):\n self.adcMax = power\n \n self.normalizedPower = power / self.adcMax\n #20 * log10(sqrt(sum / count))\n \n if(self.normalizedPower > 1):\n self.normalizedPower = 1 \n \n return [time_elapsed, power]\n \n def update_leds(self): \n leds_count = m.floor(self.normalizedPower * self.ledNumber) \n self.ledChain.show( self.ledsColors[1:leds_count] )\n",
"step-ids": [
7,
11,
12,
14,
15
]
}
|
[
7,
11,
12,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',
'0001_initial')]
operations = [migrations.AddField(model_name='discorduser', name=
'has_elements', field=models.ManyToManyField(to='element.Element'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',
'0001_initial')]
operations = [migrations.AddField(model_name='discorduser', name=
'has_elements', field=models.ManyToManyField(to='element.Element'))]
<|reserved_special_token_1|>
# Generated by Django 3.1.1 on 2020-12-02 19:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('element', '0011_suggestion_suggestion_type'),
('bot', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='discorduser',
name='has_elements',
field=models.ManyToManyField(to='element.Element'),
),
]
|
flexible
|
{
"blob_id": "43ae01ffe35c6c4491f3f7e480dd6f5c1be86eb2",
"index": 2475,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',\n '0001_initial')]\n operations = [migrations.AddField(model_name='discorduser', name=\n 'has_elements', field=models.ManyToManyField(to='element.Element'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',\n '0001_initial')]\n operations = [migrations.AddField(model_name='discorduser', name=\n 'has_elements', field=models.ManyToManyField(to='element.Element'))]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-12-02 19:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('element', '0011_suggestion_suggestion_type'),\n ('bot', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='discorduser',\n name='has_elements',\n field=models.ManyToManyField(to='element.Element'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn
=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft
='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
<|reserved_special_token_0|>
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
bot_response = CHATBOT.get_response(text).text
print(bot_response)
analysis = VADER_ANALYZER.polarity_scores(text)
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
CLIENT.send_message('/filter', freq)
exchange = {text: bot_response}
_log_conversation('conversation.db', exchange)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn
=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft
='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(
'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")'
.format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=
'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
bot_response = CHATBOT.get_response(text).text
print(bot_response)
analysis = VADER_ANALYZER.polarity_scores(text)
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
CLIENT.send_message('/filter', freq)
exchange = {text: bot_response}
_log_conversation('conversation.db', exchange)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn
=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft
='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(
'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")'
.format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=
'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
bot_response = CHATBOT.get_response(text).text
print(bot_response)
analysis = VADER_ANALYZER.polarity_scores(text)
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
CLIENT.send_message('/filter', freq)
exchange = {text: bot_response}
_log_conversation('conversation.db', exchange)
if __name__ == '__main__':
TABLE_NAME = 'conversation_log'
INPUT_COLUMN = 'input_column'
OUTPUT_COLUMN = 'output_column'
CONVERSATION_DB = 'conversation.db'
_create_connection(CONVERSATION_DB)
CHATBOT = ChatBot('Sentiment Music Bot', trainer=
'chatterbot.trainers.ChatterBotCorpusTrainer')
CHATBOT.train('chatterbot.corpus.english')
nltk.download('vader_lexicon')
VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()
IP = 'localhost'
PORT = 9000
CLIENT = udp_client.SimpleUDPClient(IP, PORT)
while True:
USER_RESPONSE = input("Talk ('exit' to exit): ")
if USER_RESPONSE == 'exit':
break
else:
main(USER_RESPONSE)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import datetime
import sqlite3
from sqlite3 import Error
import nltk.sentiment
from chatterbot import ChatBot
from pythonosc import udp_client
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn
=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft
='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(
'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")'
.format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=
'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
bot_response = CHATBOT.get_response(text).text
print(bot_response)
analysis = VADER_ANALYZER.polarity_scores(text)
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
CLIENT.send_message('/filter', freq)
exchange = {text: bot_response}
_log_conversation('conversation.db', exchange)
if __name__ == '__main__':
TABLE_NAME = 'conversation_log'
INPUT_COLUMN = 'input_column'
OUTPUT_COLUMN = 'output_column'
CONVERSATION_DB = 'conversation.db'
_create_connection(CONVERSATION_DB)
CHATBOT = ChatBot('Sentiment Music Bot', trainer=
'chatterbot.trainers.ChatterBotCorpusTrainer')
CHATBOT.train('chatterbot.corpus.english')
nltk.download('vader_lexicon')
VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()
IP = 'localhost'
PORT = 9000
CLIENT = udp_client.SimpleUDPClient(IP, PORT)
while True:
USER_RESPONSE = input("Talk ('exit' to exit): ")
if USER_RESPONSE == 'exit':
break
else:
main(USER_RESPONSE)
<|reserved_special_token_1|>
"""
This is the main script
"""
import datetime
import sqlite3
from sqlite3 import Error
import nltk.sentiment
from chatterbot import ChatBot
from pythonosc import udp_client
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
# Create a new SQLite table
cur.execute("CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})"
.format(tn=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN,
time='time', ft='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute("""INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")""".
format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time='time',
v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
# Get CHATBOT response from the user input.
bot_response = CHATBOT.get_response(text).text
print(bot_response)
# Get polarity score from CHATBOT response.
analysis = VADER_ANALYZER.polarity_scores(text)
# Change polarity score relatively to a audible frequency.
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
# Send OSC message, to be listened to by pd.
CLIENT.send_message("/filter", freq)
# Log conversation.
exchange = {text: bot_response}
_log_conversation("conversation.db", exchange)
if __name__ == '__main__':
# Set up database
TABLE_NAME = 'conversation_log'
INPUT_COLUMN = 'input_column'
OUTPUT_COLUMN = 'output_column'
CONVERSATION_DB = "conversation.db"
_create_connection(CONVERSATION_DB)
# Set up chatbot.
CHATBOT = ChatBot(
'Sentiment Music Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer')
# Train based on the english corpus.
CHATBOT.train("chatterbot.corpus.english")
# Download lexicon for nltk.
nltk.download('vader_lexicon')
# Set up sentiment analyzer.
VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()
# Set up OSC client.
IP = 'localhost'
PORT = 9000
CLIENT = udp_client.SimpleUDPClient(IP, PORT)
# Run chatbot.
while True:
USER_RESPONSE = input("Talk ('exit' to exit): ")
if USER_RESPONSE == 'exit': # Exit on 'exit' string.
break
else:
main(USER_RESPONSE)
|
flexible
|
{
"blob_id": "2b8b5b893d61d11d2795f5be96fde759256a15e8",
"index": 9741,
"step-1": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\n<mask token>\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\nif __name__ == '__main__':\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = 'conversation.db'\n _create_connection(CONVERSATION_DB)\n CHATBOT = ChatBot('Sentiment Music Bot', trainer=\n 'chatterbot.trainers.ChatterBotCorpusTrainer')\n CHATBOT.train('chatterbot.corpus.english')\n nltk.download('vader_lexicon')\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit':\n break\n else:\n main(USER_RESPONSE)\n",
"step-4": "<mask token>\nimport datetime\nimport sqlite3\nfrom sqlite3 import Error\nimport nltk.sentiment\nfrom chatterbot import ChatBot\nfrom pythonosc import udp_client\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\nif __name__ == '__main__':\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = 'conversation.db'\n _create_connection(CONVERSATION_DB)\n CHATBOT = ChatBot('Sentiment Music Bot', trainer=\n 'chatterbot.trainers.ChatterBotCorpusTrainer')\n CHATBOT.train('chatterbot.corpus.english')\n nltk.download('vader_lexicon')\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit':\n break\n else:\n main(USER_RESPONSE)\n",
"step-5": "\"\"\"\nThis is the main script\n\"\"\"\n\nimport datetime\nimport sqlite3\nfrom sqlite3 import Error\nimport nltk.sentiment\nfrom chatterbot import ChatBot\nfrom pythonosc import udp_client\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n\n # Create a new SQLite table\n cur.execute(\"CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})\"\n .format(tn=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN,\n time='time', ft='TEXT'))\n\n except Error as err:\n print(err)\n\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\"\"\"INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")\"\"\".\n format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time='time',\n v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n\n except Error as err:\n print(err)\n\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n\n # Get CHATBOT response from the user input.\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n\n # Get polarity score from CHATBOT response.\n analysis = VADER_ANALYZER.polarity_scores(text)\n\n # Change polarity score relatively to a audible frequency.\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n\n # Send OSC message, to be listened to by pd.\n CLIENT.send_message(\"/filter\", freq)\n\n # Log conversation.\n exchange = {text: bot_response}\n _log_conversation(\"conversation.db\", exchange)\n\n\nif __name__ == '__main__':\n\n # Set up database\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = \"conversation.db\"\n _create_connection(CONVERSATION_DB)\n\n # Set up chatbot.\n CHATBOT = ChatBot(\n 'Sentiment Music Bot',\n trainer='chatterbot.trainers.ChatterBotCorpusTrainer')\n\n # Train based on the english corpus.\n CHATBOT.train(\"chatterbot.corpus.english\")\n\n # Download lexicon for nltk.\n nltk.download('vader_lexicon')\n\n # Set up sentiment analyzer.\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n\n # Set up OSC client.\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n\n # Run chatbot.\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit': # Exit on 'exit' string.\n break\n else:\n main(USER_RESPONSE)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = deque()
def enQueue(self, i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items) == 0
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = deque()
def enQueue(self, i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items) == 0
def size(self):
return len(self.items)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = deque()
def enQueue(self, i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items) == 0
def size(self):
return len(self.items)
<|reserved_special_token_0|>
if __name__ == '__main__':
q = Queue()
print(q.items)
q.enQueue('A')
print(q.items)
q.deQueue()
print(q.items)
print(q.isEmpty())
<|reserved_special_token_1|>
from collections import deque
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = deque()
def enQueue(self, i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items) == 0
def size(self):
return len(self.items)
<|reserved_special_token_0|>
if __name__ == '__main__':
q = Queue()
print(q.items)
q.enQueue('A')
print(q.items)
q.deQueue()
print(q.items)
print(q.isEmpty())
<|reserved_special_token_1|>
from collections import deque
'''
Big O
เวลาเรียก queue จะมี2operation 1deque 2enqueue เวลาเอาไปใช้
อยู่ที่การimplementation
โปรแกรมที่ดี 1.ทำงานถูกต้อง 2.ทันใจ 3.ทรัพยากรที่ใช้รันได้ทุกเครื่อง(specคอมกาก)
4.ทำงานได้ตามต้องการ5.ความเสถียรของระบบ 6.Bugs
แพง คือ memory expansive ใช้หน่วยความจำเยอะ
runtime expensive ใช้เวลาเยอะ
เลยเกิด queue linklist
โดยแต่ละอย่าง
- linklist มีcost มาเกี่ยว
- dequeue ใช้ O(1) มันขึ้นกับว่าจำหน่วยตัวข้างในมี10ตัวใช้ 1ms 1ล้านตัวก็ใช้ 1ms
เรียกความเร็วคงที่ อีกชื่อ O(1) โอวัน โอหนึ่ง แต่มันในอุดมคติ
เวลาใใช้ linklist เก็บตัวชี้และ ข้อมูล มันเลยใช้ หน่วยความจำเป็น2เท่าของ list
Big O คือการวิเคราะห์ runing time complexityเปรียบเทียบสองตัวว่าตัวไหนมีประสิทธิภาพดีกว่า
แต่Big O ที่ดีกว่าไม่ได้เร็วกว่า เพราะ ขึ้นอยุกับ ความเร็วspecเครื่อง
n T(n)
1 1ms
10 10ms
1M 1000s
T(N)ผันตามn เรียก O(n)
อีกเคส
n T(n)
1 1
10 100
1M 1Ms
T(N) ผันตาม n^2,n^3,n! จะใช้เวลาเยอะมาก
เช่น ให้ทาย อันไหนเร็วสุด
1. O(1) อันดับ1
2. O(n) อันดับ3
3. O(n^2) อันดับ4
4. O(logn) อันดับ2
เวลาใช้ linklist จะมี3ขั้นตอนในการเชื่อม 1.สร้างnodeใหม่ 2.ลิ้งข้อมูลอันเก่ากะอันใหม่ 3.ลิ้งส่วนfront
radix sort ดูค่าในแต่ละหลัก
1.รับ input เก็บไว้ในqueue
2.หยิบตัวแรกออกไป
3.มันจะหาว่าตัวไหนmax และมีกี่หลัก
4.จะมีการเทียบ3รอบ รอบที่1 เอาข้อมูลที่ดึงออกมา เก็บไว้ตามหลักในรอบนั้นๆเช่น 64 เลขหลักหน่วยตรงกับหลัก4 ก้เก่บไว้ที่4
'''
class Queue:
def __init__(self):
self.items=deque()
def enQueue(self,i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items)==0
def size(self):
return len(self.items)
'''class Queue():
def __init__(self,list=None):
if list==None:
self.items=[]
else:
self.items=list
def enQueue(self,i):
self.items.append(i)
def deQueue(self):
self.items.pop(0)
def isQEmpty(self):
return len(self.items)==0
def size(self):
return len(self.items)
'''
if __name__== '__main__':
q=Queue()
print(q.items)
q.enQueue('A')
print(q.items)
q.deQueue()
print(q.items)
print(q.isEmpty())
|
flexible
|
{
"blob_id": "c96a64573fc6cc207ee09be4f4b183d065736ff6",
"index": 5442,
"step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = deque()\n\n def enQueue(self, i):\n self.items.append(i)\n\n def deQueue(self):\n return self.items.popleft()\n\n def isEmpty(self):\n return len(self.items) == 0\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = deque()\n\n def enQueue(self, i):\n self.items.append(i)\n\n def deQueue(self):\n return self.items.popleft()\n\n def isEmpty(self):\n return len(self.items) == 0\n\n def size(self):\n return len(self.items)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = deque()\n\n def enQueue(self, i):\n self.items.append(i)\n\n def deQueue(self):\n return self.items.popleft()\n\n def isEmpty(self):\n return len(self.items) == 0\n\n def size(self):\n return len(self.items)\n\n\n<mask token>\nif __name__ == '__main__':\n q = Queue()\n print(q.items)\n q.enQueue('A')\n print(q.items)\n q.deQueue()\n print(q.items)\n print(q.isEmpty())\n",
"step-4": "from collections import deque\n<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = deque()\n\n def enQueue(self, i):\n self.items.append(i)\n\n def deQueue(self):\n return self.items.popleft()\n\n def isEmpty(self):\n return len(self.items) == 0\n\n def size(self):\n return len(self.items)\n\n\n<mask token>\nif __name__ == '__main__':\n q = Queue()\n print(q.items)\n q.enQueue('A')\n print(q.items)\n q.deQueue()\n print(q.items)\n print(q.isEmpty())\n",
"step-5": "from collections import deque\r\n'''\r\nBig O \r\nเวลาเรียก queue จะมี2operation 1deque 2enqueue เวลาเอาไปใช้\r\nอยู่ที่การimplementation\r\nโปรแกรมที่ดี 1.ทำงานถูกต้อง 2.ทันใจ 3.ทรัพยากรที่ใช้รันได้ทุกเครื่อง(specคอมกาก)\r\n4.ทำงานได้ตามต้องการ5.ความเสถียรของระบบ 6.Bugs\r\n\r\nแพง คือ memory expansive ใช้หน่วยความจำเยอะ\r\n\truntime expensive ใช้เวลาเยอะ\r\nเลยเกิด queue linklist\r\nโดยแต่ละอย่าง\r\n- linklist มีcost มาเกี่ยว \r\n- dequeue ใช้ O(1) มันขึ้นกับว่าจำหน่วยตัวข้างในมี10ตัวใช้ 1ms 1ล้านตัวก็ใช้ 1ms\r\nเรียกความเร็วคงที่ อีกชื่อ O(1) โอวัน โอหนึ่ง แต่มันในอุดมคติ\r\nเวลาใใช้ linklist เก็บตัวชี้และ ข้อมูล มันเลยใช้ หน่วยความจำเป็น2เท่าของ list\r\n\r\nBig O คือการวิเคราะห์ runing time complexityเปรียบเทียบสองตัวว่าตัวไหนมีประสิทธิภาพดีกว่า\r\nแต่Big O ที่ดีกว่าไม่ได้เร็วกว่า เพราะ ขึ้นอยุกับ ความเร็วspecเครื่อง\r\nn T(n)\r\n1 1ms\r\n10 10ms\r\n1M 1000s\r\nT(N)ผันตามn เรียก O(n)\r\nอีกเคส\r\nn T(n)\r\n1 1\r\n10 100\r\n1M 1Ms\r\nT(N) ผันตาม n^2,n^3,n! จะใช้เวลาเยอะมาก\r\n\r\nเช่น ให้ทาย อันไหนเร็วสุด\r\n1. O(1)\tอันดับ1\r\n2. O(n)\tอันดับ3\r\n3. O(n^2)\tอันดับ4\r\n4. O(logn)\tอันดับ2\r\n\r\n\r\n\r\nเวลาใช้ linklist จะมี3ขั้นตอนในการเชื่อม 1.สร้างnodeใหม่ 2.ลิ้งข้อมูลอันเก่ากะอันใหม่ 3.ลิ้งส่วนfront\r\n\r\nradix sort ดูค่าในแต่ละหลัก\r\n1.รับ input เก็บไว้ในqueue\r\n2.หยิบตัวแรกออกไป\r\n3.มันจะหาว่าตัวไหนmax และมีกี่หลัก\r\n4.จะมีการเทียบ3รอบ รอบที่1 เอาข้อมูลที่ดึงออกมา เก็บไว้ตามหลักในรอบนั้นๆเช่น 64 เลขหลักหน่วยตรงกับหลัก4 ก้เก่บไว้ที่4\r\n'''\r\nclass Queue:\r\n def __init__(self):\r\n self.items=deque()\r\n def enQueue(self,i):\r\n self.items.append(i)\r\n def deQueue(self):\r\n return self.items.popleft()\r\n def isEmpty(self):\r\n return len(self.items)==0\r\n def size(self):\r\n return len(self.items)\r\n'''class Queue(): \r\n def __init__(self,list=None):\r\n if list==None:\r\n self.items=[]\r\n else:\r\n self.items=list\r\n \r\n def enQueue(self,i):\r\n self.items.append(i)\r\n def deQueue(self):\r\n self.items.pop(0)\r\n def isQEmpty(self):\r\n return len(self.items)==0\r\n def size(self):\r\n return len(self.items)\r\n'''\r\nif __name__== '__main__':\r\n q=Queue()\r\n print(q.items)\r\n q.enQueue('A')\r\n print(q.items)\r\n q.deQueue()\r\n print(q.items)\r\n print(q.isEmpty())\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
<|reserved_special_token_0|>
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
while True:
print_pin_status(ledPin)
key = input('Action, press q to quit: ')
print(key)
if key == ' ':
print('space pushed')
if key == '1':
if pinOn:
print('turning led off')
GPIO.output(ledPin, GPIO.LOW)
pinOn = False
else:
print('turning led on')
GPIO.output(ledPin, GPIO.HIGH)
pinOn = True
if key == 'q':
print('Quiting. . .')
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
ledPin = 4
pinOn = False
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
while True:
print_pin_status(ledPin)
key = input('Action, press q to quit: ')
print(key)
if key == ' ':
print('space pushed')
if key == '1':
if pinOn:
print('turning led off')
GPIO.output(ledPin, GPIO.LOW)
pinOn = False
else:
print('turning led on')
GPIO.output(ledPin, GPIO.HIGH)
pinOn = True
if key == 'q':
print('Quiting. . .')
break
<|reserved_special_token_1|>
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
ledPin = 4
pinOn = False
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
while True:
print_pin_status(ledPin)
key = input('Action, press q to quit: ')
print(key)
if key == ' ':
print('space pushed')
if key == '1':
if pinOn:
print('turning led off')
GPIO.output(ledPin, GPIO.LOW)
pinOn = False
else:
print('turning led on')
GPIO.output(ledPin, GPIO.HIGH)
pinOn = True
if key == 'q':
print('Quiting. . .')
break
<|reserved_special_token_1|>
#!/usr/bin/python
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
ledPin = 4
pinOn = False
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
while True:
print_pin_status(ledPin)
key = input("Action, press q to quit: ")
print(key)
if key == ' ':
print("space pushed")
if key == '1':
if pinOn:
print("turning led off")
GPIO.output(ledPin, GPIO.LOW)
pinOn = False
else:
print("turning led on")
GPIO.output(ledPin, GPIO.HIGH)
pinOn = True
if key == 'q':
print("Quiting. . .")
break
|
flexible
|
{
"blob_id": "492c416becc44deaafef519eae8c9a82ac00cc0e",
"index": 8632,
"step-1": "<mask token>\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\n<mask token>\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nledPin = 4\npinOn = False\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n",
"step-4": "import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nledPin = 4\npinOn = False\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n",
"step-5": "#!/usr/bin/python\n\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nledPin = 4\npinOn = False\n\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n\n key = input(\"Action, press q to quit: \")\n\n print(key)\n\n if key == ' ':\n print(\"space pushed\")\n\n if key == '1':\n\n if pinOn:\n print(\"turning led off\")\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print(\"turning led on\")\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n\n if key == 'q':\n print(\"Quiting. . .\")\n break\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.contrib import admin
from .models import Advert, Category, ImageAd
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = "categories",
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
|
normal
|
{
"blob_id": "fdcee5b3f6b3ec170c9ef3017e0cc6c4b28cf22d",
"index": 454,
"step-1": "<mask token>\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-2": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n <mask token>\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-3": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-4": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-5": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = \"categories\",\n\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
print("Convertidor de pies y pulgadas a centímetros")
pies = float(input("Escriba una cantidad de pies: "))
pulgadas = float(input("Escriba una cantidad de pulgadas: "))
cm = (pies * 12 + pulgadas) * 2.54;
print("{} pies y {} pulgadas son {} cm".format(pies, pulgadas, cm))
|
normal
|
{
"blob_id": "b0ab97f5c05cdeee4c01460109a76cef75ac72ce",
"index": 5342,
"step-1": "<mask token>\n",
"step-2": "print('Convertidor de pies y pulgadas a centímetros')\n<mask token>\nprint('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))\n",
"step-3": "print('Convertidor de pies y pulgadas a centímetros')\npies = float(input('Escriba una cantidad de pies: '))\npulgadas = float(input('Escriba una cantidad de pulgadas: '))\ncm = (pies * 12 + pulgadas) * 2.54\nprint('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))\n",
"step-4": "print(\"Convertidor de pies y pulgadas a centímetros\")\npies = float(input(\"Escriba una cantidad de pies: \"))\npulgadas = float(input(\"Escriba una cantidad de pulgadas: \"))\ncm = (pies * 12 + pulgadas) * 2.54;\nprint(\"{} pies y {} pulgadas son {} cm\".format(pies, pulgadas, cm))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IS_TESTING = False
FOLDER_TO_ORGANIZE = ''
FOLDER_FOR_OTHERS = ''
FOLDER_TO_ORGANIZE_TEST = ''
LOG_FILE = ''
IGNORE_HIDDEN_FILES = True
FILES_DESTINATION = {
'images': ['.jpg', '.jpeg', '.png'],
'documents': ['.pdf', '.xlsx', '.docx', '.txt'],
'apps': ['.pkg', '.dmg', '.exe'],
'videos': ['.mp4', '.flv'],
'audios': ['.mp3'],
'compressions': ['.rar', '.zip'],
'scripts': ['.py', '.rb', '.js', '.html'],
}
|
normal
|
{
"blob_id": "83e2f9c56c45a288aabd777fb244089367649258",
"index": 1165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nIS_TESTING = False\nFOLDER_TO_ORGANIZE = ''\nFOLDER_FOR_OTHERS = ''\nFOLDER_TO_ORGANIZE_TEST = ''\nLOG_FILE = ''\nIGNORE_HIDDEN_FILES = True\nFILES_DESTINATION = {'images': ['.jpg', '.jpeg', '.png'], 'documents': [\n '.pdf', '.xlsx', '.docx', '.txt'], 'apps': ['.pkg', '.dmg', '.exe'],\n 'videos': ['.mp4', '.flv'], 'audios': ['.mp3'], 'compressions': ['.rar',\n '.zip'], 'scripts': ['.py', '.rb', '.js', '.html']}\n",
"step-3": "import os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nIS_TESTING = False\nFOLDER_TO_ORGANIZE = ''\nFOLDER_FOR_OTHERS = ''\nFOLDER_TO_ORGANIZE_TEST = ''\nLOG_FILE = ''\nIGNORE_HIDDEN_FILES = True\nFILES_DESTINATION = {'images': ['.jpg', '.jpeg', '.png'], 'documents': [\n '.pdf', '.xlsx', '.docx', '.txt'], 'apps': ['.pkg', '.dmg', '.exe'],\n 'videos': ['.mp4', '.flv'], 'audios': ['.mp3'], 'compressions': ['.rar',\n '.zip'], 'scripts': ['.py', '.rb', '.js', '.html']}\n",
"step-4": "import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nIS_TESTING = False\nFOLDER_TO_ORGANIZE = ''\nFOLDER_FOR_OTHERS = ''\nFOLDER_TO_ORGANIZE_TEST = ''\nLOG_FILE = ''\nIGNORE_HIDDEN_FILES = True\n\nFILES_DESTINATION = {\n 'images': ['.jpg', '.jpeg', '.png'],\n 'documents': ['.pdf', '.xlsx', '.docx', '.txt'],\n 'apps': ['.pkg', '.dmg', '.exe'],\n 'videos': ['.mp4', '.flv'],\n 'audios': ['.mp3'],\n 'compressions': ['.rar', '.zip'],\n 'scripts': ['.py', '.rb', '.js', '.html'],\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask_socketio import SocketIO
socket = SocketIO()
@socket.on('test')
def on_test(msg):
print 'got message'
|
normal
|
{
"blob_id": "7435aa6cd4eec5582be9f4a1dd75b0dfcadc4409",
"index": 5137,
"step-1": "from flask_socketio import SocketIO\n\nsocket = SocketIO()\n\[email protected]('test')\ndef on_test(msg):\n print 'got message'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django import forms
class CommentForm(forms.Form):
name = forms.CharField(label='称呼')
email = forms.EmailField(label='邮箱')
content = forms.CharField(label='内容')
|
normal
|
{
"blob_id": "c2ff3c5e44fa361671a3fdb38060517bcc4bc82c",
"index": 2778,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommentForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n",
"step-4": "from django import forms\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {'profile': profile, 'posts': posts, 'check': check}
return render(request, 'profile.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {'profile': profile, 'posts': posts, 'check': check}
return render(request, 'profile.html', context)
def follow_user_view(request, user1, user2):
follower = CustomUser.objects.get(username=user1)
to_follow = CustomUser.objects.get(username=user2)
follower_profile = Profile.objects.get(user=follower)
to_follow_profile = Profile.objects.get(user=to_follow)
if follower not in to_follow_profile.followers.all():
follower_profile.following.add(to_follow)
to_follow_profile.followers.add(follower)
follower_profile.following_count += 1
to_follow_profile.followers_count += 1
follower_profile.save()
to_follow_profile.save()
return redirect('profile', user2)
else:
return redirect('profile', user2)
<|reserved_special_token_1|>
from django.shortcuts import redirect, render
from users.models import CustomUser
from .models import Profile
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {'profile': profile, 'posts': posts, 'check': check}
return render(request, 'profile.html', context)
def follow_user_view(request, user1, user2):
follower = CustomUser.objects.get(username=user1)
to_follow = CustomUser.objects.get(username=user2)
follower_profile = Profile.objects.get(user=follower)
to_follow_profile = Profile.objects.get(user=to_follow)
if follower not in to_follow_profile.followers.all():
follower_profile.following.add(to_follow)
to_follow_profile.followers.add(follower)
follower_profile.following_count += 1
to_follow_profile.followers_count += 1
follower_profile.save()
to_follow_profile.save()
return redirect('profile', user2)
else:
return redirect('profile', user2)
<|reserved_special_token_1|>
from django.shortcuts import redirect, render
from users.models import CustomUser
from .models import Profile
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {
'profile' : profile,
'posts' : posts,
'check' : check,
}
return render(request, 'profile.html', context)
def follow_user_view(request, user1, user2):
follower = CustomUser.objects.get(username = user1)
to_follow = CustomUser.objects.get(username = user2)
follower_profile = Profile.objects.get(user = follower)
to_follow_profile = Profile.objects.get(user = to_follow)
if follower not in to_follow_profile.followers.all():
follower_profile.following.add(to_follow)
to_follow_profile.followers.add(follower)
follower_profile.following_count += 1
to_follow_profile.followers_count += 1
follower_profile.save()
to_follow_profile.save()
return redirect('profile', user2)
else:
return redirect('profile', user2)
|
flexible
|
{
"blob_id": "3caaa455cda0567b79ae063c777846157839d64f",
"index": 8548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username=user1)\n to_follow = CustomUser.objects.get(username=user2)\n follower_profile = Profile.objects.get(user=follower)\n to_follow_profile = Profile.objects.get(user=to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n else:\n return redirect('profile', user2)\n",
"step-4": "from django.shortcuts import redirect, render\nfrom users.models import CustomUser\nfrom .models import Profile\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username=user1)\n to_follow = CustomUser.objects.get(username=user2)\n follower_profile = Profile.objects.get(user=follower)\n to_follow_profile = Profile.objects.get(user=to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n else:\n return redirect('profile', user2)\n",
"step-5": "from django.shortcuts import redirect, render\nfrom users.models import CustomUser\nfrom .models import Profile\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {\n 'profile' : profile,\n 'posts' : posts,\n 'check' : check,\n }\n return render(request, 'profile.html', context)\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username = user1)\n to_follow = CustomUser.objects.get(username = user2)\n follower_profile = Profile.objects.get(user = follower)\n to_follow_profile = Profile.objects.get(user = to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n \n else:\n return redirect('profile', user2)\n\n\n \n\n\n\n\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,
ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*
args, **kwargs)
kwargs.update({'user': self.request.user, 'colegio': self.request.
session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.
object.pk})
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'pk']})
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available +
old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas',
'Horas deben ser como mínimo las del plan de estudios original ({})'
.format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AsistenteListView(LoginRequiredMixin, SearchMixin,
GetObjectsForUserMixin, ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,
ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*
args, **kwargs)
kwargs.update({'user': self.request.user, 'colegio': self.request.
session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.
object.pk})
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'pk']})
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available +
old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas',
'Horas deben ser como mínimo las del plan de estudios original ({})'
.format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProfesorDeleteView(LoginRequiredMixin, DeleteView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AsistenteListView(LoginRequiredMixin, SearchMixin,
GetObjectsForUserMixin, ListView):
"""
Listado de asistentes
"""
model = Asistente
lookup = 'colegio__pk'
template_name = 'carga_horaria/asistente/listado_asistente.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,
ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*
args, **kwargs)
kwargs.update({'user': self.request.user, 'colegio': self.request.
session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.
object.pk})
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'pk']})
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available +
old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas',
'Horas deben ser como mínimo las del plan de estudios original ({})'
.format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProfesorListView(LoginRequiredMixin, SearchMixin,
GetObjectsForUserMixin, ListView):
"""
Listado de profesores
"""
model = Profesor
lookup = 'colegio__pk'
template_name = 'carga_horaria/profesor/listado_profesor.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class ProfesorDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Profesor
"""
model = Profesor
template_name = 'carga_horaria/profesor/detalle_profesor.html'
class ProfesorCreateView(LoginRequiredMixin, CreateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/nuevo_profesor.html'
success_url = reverse_lazy('carga-horaria:profesores')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorCreateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(reverse('carga-horaria:profesores'))
class ProfesorUpdateView(LoginRequiredMixin, UpdateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/editar_profesor.html'
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorUpdateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(self.get_success_url())
def get_success_url(self):
return reverse('carga-horaria:profesor', kwargs={'pk': self.object.pk})
class ProfesorDeleteView(LoginRequiredMixin, DeleteView):
model = Profesor
success_url = reverse_lazy('carga-horaria:profesores')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsistenteListView(LoginRequiredMixin, SearchMixin,
GetObjectsForUserMixin, ListView):
"""
Listado de asistentes
"""
model = Asistente
lookup = 'colegio__pk'
template_name = 'carga_horaria/asistente/listado_asistente.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **
kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user, 'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.
get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.
cleaned_data['rut'], defaults={'nombre': form.cleaned_data[
'nombre'], 'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.
cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[
'telefono'], 'email_personal': form.cleaned_data[
'email_personal'], 'email_institucional': form.cleaned_data[
'email_institucional'], 'estado_civil': form.cleaned_data[
'estado_civil'], 'discapacidad': form.cleaned_data[
'discapacidad'], 'recibe_pension': form.cleaned_data[
'recibe_pension'], 'adventista': form.cleaned_data['adventista'
], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,
ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*
args, **kwargs)
kwargs.update({'user': self.request.user, 'colegio': self.request.
session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.
object.pk})
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
<|reserved_special_token_0|>
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'pk']})
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
def form_valid(self, form):
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas',
'Horas superan el tiempo disponible ({})'.format(available +
old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas',
'Horas deben ser como mínimo las del plan de estudios original ({})'
.format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[
'periodo_pk']})
<|reserved_special_token_1|>
from django.db.models import Q
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from carga_horaria.models import Profesor, AsignaturaBase, Asignatura, Asistente
from carga_horaria.formsAlexis import ProfesorForm, AsignaturaBaseForm, AsignaturaCreateForm, AsignaturaUpdateForm, AsistenteForm
from django.core.urlresolvers import reverse_lazy, reverse
from guardian.shortcuts import get_objects_for_user
from .models import Persona
from .models import Fundacion
from .models import Colegio
from .models import Periodo
from .models import Nivel
class LevelFilterMixin(object):
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
# FIXME: I will leave it like this for now,
# but it's still possible for somebody to poke object ids to see what shouldn't see
# fix this!!1
class SearchMixin(object):
def get_queryset(self):
qs = super(SearchMixin, self).get_queryset()
q = self.request.GET.get('q', None)
if q:
if qs.model == Profesor:
qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionextra__descripcion__unaccent__icontains=q) | Q(asignacionnoaula__descripcion__unaccent__icontains=q))
else:
qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionasistente__descripcion__unaccent__icontains=q) | Q(funcion__unaccent__icontains=q))
return qs
def get_for_user(request, qs, lookup, user):
periodo = request.session.get('periodo', 2020)
if not user.is_superuser:
colegios = [c.pk for c in get_objects_for_user(user, "carga_horaria.change_colegio")]
# new logic for colegio switcher
selected = request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(lookup): colegios,
"{}periode".format(lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
else:
colegios = [c.pk for c in Colegio.objects.all()]
# new logic for colegio switcher
selected = request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(lookup): colegios,
"{}periode".format(lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
class GetObjectsForUserMixin(object):
def get_queryset(self):
qs = super(GetObjectsForUserMixin, self).get_queryset()
periodo = self.request.session.get('periodo', 2020)
if not self.request.user.is_superuser:
colegios = [c.pk for c in get_objects_for_user(self.request.user, "carga_horaria.change_colegio")]
# new logic for colegio switcher
selected = self.request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(self.lookup): colegios,
"{}periode".format(self.lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
else:
colegios = [c.pk for c in Colegio.objects.all()]
# new logic for colegio switcher
selected = self.request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(self.lookup): colegios,
"{}periode".format(self.lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
class ObjPermissionRequiredMixin(object):
def get_object(self, *args, **kwargs):
obj = super(ObjPermissionRequiredMixin, self).get_object(*args, **kwargs)
if self.request.user.has_perm(self.permission, obj):
return obj
else:
raise Http404
"""
Comienzo Crud Profesor
"""
class ProfesorListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):
"""
Listado de profesores
"""
model = Profesor
lookup = 'colegio__pk'
template_name = 'carga_horaria/profesor/listado_profesor.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class ProfesorDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Profesor
"""
model = Profesor
template_name = 'carga_horaria/profesor/detalle_profesor.html'
class ProfesorCreateView(LoginRequiredMixin, CreateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/nuevo_profesor.html'
success_url = reverse_lazy('carga-horaria:profesores')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorCreateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(reverse('carga-horaria:profesores'))
class ProfesorUpdateView(LoginRequiredMixin, UpdateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/editar_profesor.html'
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorUpdateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.pk,
}
)
class ProfesorDeleteView(LoginRequiredMixin, DeleteView):
model = Profesor
success_url = reverse_lazy('carga-horaria:profesores')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
# """
# Comienzo Crud Curso
# """
# class CursoListView(ListView):
# """
# Listado de cursos
# """
# model = Curso
# template_name = 'carga_horaria/curso/listado_curso.html'
# search_fields = ['periodo', 'letra']
# paginate_by = 6
# class CursoDetailView(DetailView):
# """
# Detalle de curso
# """
# model = Curso
# template_name = 'carga_horaria/curso/detalle_curso.html'
# class CursoCreateView(CreateView):
# model = Curso
# form_class = CursoForm
# template_name = 'carga_horaria/curso/nuevo_curso.html'
# success_url = reverse_lazy('carga-horaria:cursos')
# class CursoUpdateView(UpdateView):
# model = Curso
# form_class = CursoForm
# template_name = 'carga_horaria/curso/editar_curso.html'
# def get_success_url(self):
# return reverse(
# 'carga-horaria:curso',
# kwargs={
# 'pk': self.object.pk,
# }
# )
# class CursoDeleteView(DeleteView):
# model = Curso
# success_url = reverse_lazy('carga-horaria:cursos')
# def get(self, request, *args, **kwargs):
# return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asistente
"""
class AsistenteListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):
"""
Listado de asistentes
"""
model = Asistente
lookup = 'colegio__pk'
template_name = 'carga_horaria/asistente/listado_asistente.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse(
'carga-horaria:asistente',
kwargs={
'pk': self.object.pk,
}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asignatura Base
"""
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse(
'carga-horaria:asignaturabase',
kwargs={
'pk': self.object.pk,
}
)
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asignatura
"""
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
# dirty validation
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas', "Horas superan el tiempo disponible ({})".format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.kwargs['pk'],
}
)
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs['periodo_pk']})
def form_valid(self, form):
# dirty validation
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas', "Horas superan el tiempo disponible ({})".format(available + old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas', "Horas deben ser como mínimo las del plan de estudios original ({})".format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.kwargs['periodo_pk'],
}
)
|
flexible
|
{
"blob_id": "d0d86d8b5b276218add6dd11a44d5c3951cc4e14",
"index": 3846,
"step-1": "<mask token>\n\n\nclass AsistenteDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Asistente\n \"\"\"\n model = Asistente\n template_name = 'carga_horaria/asistente/detalle_asistente.html'\n\n\nclass AsistenteCreateView(LoginRequiredMixin, CreateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/nuevo_asistente.html'\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(reverse('carga-horaria:asistentes'))\n\n\nclass AsistenteUpdateView(LoginRequiredMixin, UpdateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/editar_asistente.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(self.get_success_url())\n\n\nclass AsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = Asistente\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,\n ListView):\n \"\"\"\n Listado de asignatura base\n \"\"\"\n model = AsignaturaBase\n lookup = 'plan__colegio__pk'\n template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'\n search_fields = ['nombre', 'plan']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura base\n \"\"\"\n model = AsignaturaBase\n template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'\n\n\nclass AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.\n object.pk})\n\n\nclass AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignaturaBase\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaListView(LoginRequiredMixin, ListView):\n \"\"\"\n Listado de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/listado_asignatura.html'\n search_fields = ['base', 'periodo']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(base__plan__nivel=nivel)\n periodo = self.request.GET.get('periodo')\n if periodo:\n qs = qs.filter(periodo__pk=periodo)\n return qs\n\n\nclass AsignaturaDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/detalle_asignatura.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n return ctx\n\n\nclass AsignaturaCreateView(LoginRequiredMixin, CreateView):\n model = Asignatura\n form_class = AsignaturaCreateForm\n template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['pk'])\n horas = form.cleaned_data['horas']\n available = periodo.available\n if horas > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available))\n return self.form_invalid(form)\n else:\n self.object = form.save()\n self.object.periodos.add(periodo)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'pk']})\n\n\nclass AsignaturaUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignatura\n form_class = AsignaturaUpdateForm\n template_name = 'carga_horaria/asignatura/editar_asignatura.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n horas = form.cleaned_data['horas']\n old_horas = Asignatura.objects.get(pk=self.object.pk).horas\n delta = horas - old_horas\n available = periodo.available\n if delta > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available +\n old_horas))\n return self.form_invalid(form)\n elif self.object.base:\n if periodo.colegio.jec:\n horas_base = self.object.base.horas_jec\n else:\n horas_base = self.object.base.horas_nec\n if horas < horas_base:\n form.add_error('horas',\n 'Horas deben ser como mínimo las del plan de estudios original ({})'\n .format(horas_base))\n return self.form_invalid(form)\n return super().form_valid(form)\n\n\nclass AsignaturaDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignatura\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n",
"step-2": "<mask token>\n\n\nclass AsistenteListView(LoginRequiredMixin, SearchMixin,\n GetObjectsForUserMixin, ListView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AsistenteDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Asistente\n \"\"\"\n model = Asistente\n template_name = 'carga_horaria/asistente/detalle_asistente.html'\n\n\nclass AsistenteCreateView(LoginRequiredMixin, CreateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/nuevo_asistente.html'\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(reverse('carga-horaria:asistentes'))\n\n\nclass AsistenteUpdateView(LoginRequiredMixin, UpdateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/editar_asistente.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(self.get_success_url())\n\n\nclass AsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = Asistente\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,\n ListView):\n \"\"\"\n Listado de asignatura base\n \"\"\"\n model = AsignaturaBase\n lookup = 'plan__colegio__pk'\n template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'\n search_fields = ['nombre', 'plan']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura base\n \"\"\"\n model = AsignaturaBase\n template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'\n\n\nclass AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.\n object.pk})\n\n\nclass AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignaturaBase\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaListView(LoginRequiredMixin, ListView):\n \"\"\"\n Listado de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/listado_asignatura.html'\n search_fields = ['base', 'periodo']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(base__plan__nivel=nivel)\n periodo = self.request.GET.get('periodo')\n if periodo:\n qs = qs.filter(periodo__pk=periodo)\n return qs\n\n\nclass AsignaturaDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/detalle_asignatura.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n return ctx\n\n\nclass AsignaturaCreateView(LoginRequiredMixin, CreateView):\n model = Asignatura\n form_class = AsignaturaCreateForm\n template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['pk'])\n horas = form.cleaned_data['horas']\n available = periodo.available\n if horas > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available))\n return self.form_invalid(form)\n else:\n self.object = form.save()\n self.object.periodos.add(periodo)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'pk']})\n\n\nclass AsignaturaUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignatura\n form_class = AsignaturaUpdateForm\n template_name = 'carga_horaria/asignatura/editar_asignatura.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n horas = form.cleaned_data['horas']\n old_horas = Asignatura.objects.get(pk=self.object.pk).horas\n delta = horas - old_horas\n available = periodo.available\n if delta > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available +\n old_horas))\n return self.form_invalid(form)\n elif self.object.base:\n if periodo.colegio.jec:\n horas_base = self.object.base.horas_jec\n else:\n horas_base = self.object.base.horas_nec\n if horas < horas_base:\n form.add_error('horas',\n 'Horas deben ser como mínimo las del plan de estudios original ({})'\n .format(horas_base))\n return self.form_invalid(form)\n return super().form_valid(form)\n\n\nclass AsignaturaDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignatura\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n",
"step-3": "<mask token>\n\n\nclass ProfesorDeleteView(LoginRequiredMixin, DeleteView):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AsistenteListView(LoginRequiredMixin, SearchMixin,\n GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de asistentes\n \"\"\"\n model = Asistente\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/asistente/listado_asistente.html'\n search_fields = ['nombre', 'horas']\n paginate_by = 6\n\n\nclass AsistenteDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Asistente\n \"\"\"\n model = Asistente\n template_name = 'carga_horaria/asistente/detalle_asistente.html'\n\n\nclass AsistenteCreateView(LoginRequiredMixin, CreateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/nuevo_asistente.html'\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(reverse('carga-horaria:asistentes'))\n\n\nclass AsistenteUpdateView(LoginRequiredMixin, UpdateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/editar_asistente.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(self.get_success_url())\n\n\nclass AsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = Asistente\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,\n ListView):\n \"\"\"\n Listado de asignatura base\n \"\"\"\n model = AsignaturaBase\n lookup = 'plan__colegio__pk'\n template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'\n search_fields = ['nombre', 'plan']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura base\n \"\"\"\n model = AsignaturaBase\n template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'\n\n\nclass AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.\n object.pk})\n\n\nclass AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignaturaBase\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaListView(LoginRequiredMixin, ListView):\n \"\"\"\n Listado de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/listado_asignatura.html'\n search_fields = ['base', 'periodo']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(base__plan__nivel=nivel)\n periodo = self.request.GET.get('periodo')\n if periodo:\n qs = qs.filter(periodo__pk=periodo)\n return qs\n\n\nclass AsignaturaDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/detalle_asignatura.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n return ctx\n\n\nclass AsignaturaCreateView(LoginRequiredMixin, CreateView):\n model = Asignatura\n form_class = AsignaturaCreateForm\n template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['pk'])\n horas = form.cleaned_data['horas']\n available = periodo.available\n if horas > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available))\n return self.form_invalid(form)\n else:\n self.object = form.save()\n self.object.periodos.add(periodo)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'pk']})\n\n\nclass AsignaturaUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignatura\n form_class = AsignaturaUpdateForm\n template_name = 'carga_horaria/asignatura/editar_asignatura.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n horas = form.cleaned_data['horas']\n old_horas = Asignatura.objects.get(pk=self.object.pk).horas\n delta = horas - old_horas\n available = periodo.available\n if delta > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available +\n old_horas))\n return self.form_invalid(form)\n elif self.object.base:\n if periodo.colegio.jec:\n horas_base = self.object.base.horas_jec\n else:\n horas_base = self.object.base.horas_nec\n if horas < horas_base:\n form.add_error('horas',\n 'Horas deben ser como mínimo las del plan de estudios original ({})'\n .format(horas_base))\n return self.form_invalid(form)\n return super().form_valid(form)\n\n\nclass AsignaturaDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignatura\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n",
"step-4": "<mask token>\n\n\nclass ProfesorListView(LoginRequiredMixin, SearchMixin,\n GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de profesores\n \"\"\"\n model = Profesor\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/profesor/listado_profesor.html'\n search_fields = ['nombre', 'horas']\n paginate_by = 6\n\n\nclass ProfesorDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Profesor\n \"\"\"\n model = Profesor\n template_name = 'carga_horaria/profesor/detalle_profesor.html'\n\n\nclass ProfesorCreateView(LoginRequiredMixin, CreateView):\n model = Profesor\n form_class = ProfesorForm\n template_name = 'carga_horaria/profesor/nuevo_profesor.html'\n success_url = reverse_lazy('carga-horaria:profesores')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(ProfesorCreateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n profesor = form.save(commit=False)\n profesor.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n profesor.save()\n return redirect(reverse('carga-horaria:profesores'))\n\n\nclass ProfesorUpdateView(LoginRequiredMixin, UpdateView):\n model = Profesor\n form_class = ProfesorForm\n template_name = 'carga_horaria/profesor/editar_profesor.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(ProfesorUpdateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n profesor = form.save(commit=False)\n profesor.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n profesor.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.pk})\n\n\nclass ProfesorDeleteView(LoginRequiredMixin, DeleteView):\n model = Profesor\n success_url = reverse_lazy('carga-horaria:profesores')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsistenteListView(LoginRequiredMixin, SearchMixin,\n GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de asistentes\n \"\"\"\n model = Asistente\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/asistente/listado_asistente.html'\n search_fields = ['nombre', 'horas']\n paginate_by = 6\n\n\nclass AsistenteDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Asistente\n \"\"\"\n model = Asistente\n template_name = 'carga_horaria/asistente/detalle_asistente.html'\n\n\nclass AsistenteCreateView(LoginRequiredMixin, CreateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/nuevo_asistente.html'\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **\n kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user, 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.\n get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(reverse('carga-horaria:asistentes'))\n\n\nclass AsistenteUpdateView(LoginRequiredMixin, UpdateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/editar_asistente.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.\n cleaned_data['rut'], defaults={'nombre': form.cleaned_data[\n 'nombre'], 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'], 'nacionalidad': form.\n cleaned_data['nacionalidad'], 'telefono': form.cleaned_data[\n 'telefono'], 'email_personal': form.cleaned_data[\n 'email_personal'], 'email_institucional': form.cleaned_data[\n 'email_institucional'], 'estado_civil': form.cleaned_data[\n 'estado_civil'], 'discapacidad': form.cleaned_data[\n 'discapacidad'], 'recibe_pension': form.cleaned_data[\n 'recibe_pension'], 'adventista': form.cleaned_data['adventista'\n ], 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(self.get_success_url())\n\n\nclass AsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = Asistente\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin,\n ListView):\n \"\"\"\n Listado de asignatura base\n \"\"\"\n model = AsignaturaBase\n lookup = 'plan__colegio__pk'\n template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'\n search_fields = ['nombre', 'plan']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura base\n \"\"\"\n model = AsignaturaBase\n template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'\n\n\nclass AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:asignaturabase', kwargs={'pk': self.\n object.pk})\n\n\nclass AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignaturaBase\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignaturaListView(LoginRequiredMixin, ListView):\n \"\"\"\n Listado de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/listado_asignatura.html'\n search_fields = ['base', 'periodo']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(base__plan__nivel=nivel)\n periodo = self.request.GET.get('periodo')\n if periodo:\n qs = qs.filter(periodo__pk=periodo)\n return qs\n\n\nclass AsignaturaDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/detalle_asignatura.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n return ctx\n\n\nclass AsignaturaCreateView(LoginRequiredMixin, CreateView):\n model = Asignatura\n form_class = AsignaturaCreateForm\n template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['pk'])\n horas = form.cleaned_data['horas']\n available = periodo.available\n if horas > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available))\n return self.form_invalid(form)\n else:\n self.object = form.save()\n self.object.periodos.add(periodo)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'pk']})\n\n\nclass AsignaturaUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignatura\n form_class = AsignaturaUpdateForm\n template_name = 'carga_horaria/asignatura/editar_asignatura.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n\n def form_valid(self, form):\n periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n horas = form.cleaned_data['horas']\n old_horas = Asignatura.objects.get(pk=self.object.pk).horas\n delta = horas - old_horas\n available = periodo.available\n if delta > available:\n form.add_error('horas',\n 'Horas superan el tiempo disponible ({})'.format(available +\n old_horas))\n return self.form_invalid(form)\n elif self.object.base:\n if periodo.colegio.jec:\n horas_base = self.object.base.horas_jec\n else:\n horas_base = self.object.base.horas_nec\n if horas < horas_base:\n form.add_error('horas',\n 'Horas deben ser como mínimo las del plan de estudios original ({})'\n .format(horas_base))\n return self.form_invalid(form)\n return super().form_valid(form)\n\n\nclass AsignaturaDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignatura\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs[\n 'periodo_pk']})\n",
"step-5": "from django.db.models import Q\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom carga_horaria.models import Profesor, AsignaturaBase, Asignatura, Asistente\nfrom carga_horaria.formsAlexis import ProfesorForm, AsignaturaBaseForm, AsignaturaCreateForm, AsignaturaUpdateForm, AsistenteForm\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom guardian.shortcuts import get_objects_for_user\nfrom .models import Persona\nfrom .models import Fundacion\nfrom .models import Colegio\nfrom .models import Periodo\nfrom .models import Nivel\n\n\nclass LevelFilterMixin(object):\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n\n return qs\n\n\n\n# FIXME: I will leave it like this for now,\n# but it's still possible for somebody to poke object ids to see what shouldn't see\n# fix this!!1\n\n\nclass SearchMixin(object):\n def get_queryset(self):\n qs = super(SearchMixin, self).get_queryset()\n q = self.request.GET.get('q', None)\n if q:\n if qs.model == Profesor:\n qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionextra__descripcion__unaccent__icontains=q) | Q(asignacionnoaula__descripcion__unaccent__icontains=q))\n else:\n qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionasistente__descripcion__unaccent__icontains=q) | Q(funcion__unaccent__icontains=q))\n return qs\n\n\ndef get_for_user(request, qs, lookup, user):\n periodo = request.session.get('periodo', 2020)\n\n if not user.is_superuser:\n colegios = [c.pk for c in get_objects_for_user(user, \"carga_horaria.change_colegio\")]\n \n # new logic for colegio switcher\n selected = request.session.get('colegio__pk', None)\n if selected:\n colegios = [selected]\n # end\n \n kwargs = {\"{}__in\".format(lookup): colegios,\n \"{}periode\".format(lookup[:-2]): periodo}\n return qs.filter(**kwargs).distinct()\n else:\n colegios = [c.pk for c in Colegio.objects.all()]\n # new logic for colegio switcher\n selected = request.session.get('colegio__pk', None)\n if selected:\n colegios = [selected]\n # end\n \n kwargs = {\"{}__in\".format(lookup): colegios,\n \"{}periode\".format(lookup[:-2]): periodo}\n return qs.filter(**kwargs).distinct()\n \n \n\nclass GetObjectsForUserMixin(object):\n def get_queryset(self):\n qs = super(GetObjectsForUserMixin, self).get_queryset()\n periodo = self.request.session.get('periodo', 2020)\n\n if not self.request.user.is_superuser:\n colegios = [c.pk for c in get_objects_for_user(self.request.user, \"carga_horaria.change_colegio\")]\n\n # new logic for colegio switcher\n selected = self.request.session.get('colegio__pk', None)\n if selected:\n colegios = [selected]\n # end\n \n kwargs = {\"{}__in\".format(self.lookup): colegios,\n \"{}periode\".format(self.lookup[:-2]): periodo}\n return qs.filter(**kwargs).distinct()\n else:\n colegios = [c.pk for c in Colegio.objects.all()]\n # new logic for colegio switcher\n selected = self.request.session.get('colegio__pk', None)\n if selected:\n colegios = [selected]\n # end\n \n kwargs = {\"{}__in\".format(self.lookup): colegios,\n \"{}periode\".format(self.lookup[:-2]): periodo}\n return qs.filter(**kwargs).distinct()\n\n\nclass ObjPermissionRequiredMixin(object):\n def get_object(self, *args, **kwargs):\n obj = super(ObjPermissionRequiredMixin, self).get_object(*args, **kwargs)\n if self.request.user.has_perm(self.permission, obj):\n return obj\n else:\n raise Http404\n\n\n\"\"\"\n Comienzo Crud Profesor\n\"\"\"\nclass ProfesorListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de profesores\n \"\"\"\n model = Profesor\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/profesor/listado_profesor.html'\n search_fields = ['nombre', 'horas']\n paginate_by = 6\n\n\n\nclass ProfesorDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Profesor\n \"\"\"\n model = Profesor\n template_name = 'carga_horaria/profesor/detalle_profesor.html'\n\n\nclass ProfesorCreateView(LoginRequiredMixin, CreateView):\n model = Profesor\n form_class = ProfesorForm\n template_name = 'carga_horaria/profesor/nuevo_profesor.html'\n success_url = reverse_lazy('carga-horaria:profesores')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(ProfesorCreateView, self).get_form_kwargs(*args, **kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user,\n 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n\n return kwargs\n\n def form_valid(self, form):\n profesor = form.save(commit=False)\n profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],\n defaults={'nombre': form.cleaned_data['nombre'],\n 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'],\n 'nacionalidad': form.cleaned_data['nacionalidad'],\n 'telefono': form.cleaned_data['telefono'],\n 'email_personal': form.cleaned_data['email_personal'],\n 'email_institucional': form.cleaned_data['email_institucional'],\n 'estado_civil': form.cleaned_data['estado_civil'],\n 'discapacidad': form.cleaned_data['discapacidad'],\n 'recibe_pension': form.cleaned_data['recibe_pension'],\n 'adventista': form.cleaned_data['adventista'],\n 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n profesor.save()\n return redirect(reverse('carga-horaria:profesores'))\n\n\nclass ProfesorUpdateView(LoginRequiredMixin, UpdateView):\n model = Profesor\n form_class = ProfesorForm\n template_name = 'carga_horaria/profesor/editar_profesor.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(ProfesorUpdateView, self).get_form_kwargs(*args, **kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user,\n 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n\n return kwargs\n\n def form_valid(self, form):\n profesor = form.save(commit=False)\n profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],\n defaults={'nombre': form.cleaned_data['nombre'],\n 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'],\n 'nacionalidad': form.cleaned_data['nacionalidad'],\n 'telefono': form.cleaned_data['telefono'],\n 'email_personal': form.cleaned_data['email_personal'],\n 'email_institucional': form.cleaned_data['email_institucional'],\n 'estado_civil': form.cleaned_data['estado_civil'],\n 'discapacidad': form.cleaned_data['discapacidad'],\n 'recibe_pension': form.cleaned_data['recibe_pension'],\n 'adventista': form.cleaned_data['adventista'],\n 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n profesor.save()\n return redirect(self.get_success_url())\n\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass ProfesorDeleteView(LoginRequiredMixin, DeleteView):\n model = Profesor\n success_url = reverse_lazy('carga-horaria:profesores')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n# \"\"\"\n# Comienzo Crud Curso\n# \"\"\"\n# class CursoListView(ListView):\n# \"\"\"\n# Listado de cursos\n# \"\"\"\n# model = Curso\n# template_name = 'carga_horaria/curso/listado_curso.html'\n# search_fields = ['periodo', 'letra']\n# paginate_by = 6\n\n\n# class CursoDetailView(DetailView):\n# \"\"\"\n# Detalle de curso\n# \"\"\"\n# model = Curso\n# template_name = 'carga_horaria/curso/detalle_curso.html'\n\n\n# class CursoCreateView(CreateView):\n# model = Curso\n# form_class = CursoForm\n# template_name = 'carga_horaria/curso/nuevo_curso.html'\n# success_url = reverse_lazy('carga-horaria:cursos')\n\n\n# class CursoUpdateView(UpdateView):\n# model = Curso\n# form_class = CursoForm\n# template_name = 'carga_horaria/curso/editar_curso.html'\n\n# def get_success_url(self):\n# return reverse(\n# 'carga-horaria:curso',\n# kwargs={\n# 'pk': self.object.pk,\n# }\n# )\n\n\n# class CursoDeleteView(DeleteView):\n# model = Curso\n# success_url = reverse_lazy('carga-horaria:cursos')\n\n# def get(self, request, *args, **kwargs):\n# return self.post(request, *args, **kwargs)\n\n\n\"\"\"\n Comienzo Crud Asistente\n\"\"\"\nclass AsistenteListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de asistentes\n \"\"\"\n model = Asistente\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/asistente/listado_asistente.html'\n search_fields = ['nombre', 'horas']\n paginate_by = 6\n\n\nclass AsistenteDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Asistente\n \"\"\"\n model = Asistente\n template_name = 'carga_horaria/asistente/detalle_asistente.html'\n\n\nclass AsistenteCreateView(LoginRequiredMixin, CreateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/nuevo_asistente.html'\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **kwargs)\n colegio_pk = self.request.session.get('colegio__pk', None)\n if colegio_pk:\n kwargs.update({'user': self.request.user,\n 'colegio': colegio_pk,\n 'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})\n else:\n kwargs.update({'user': self.request.user})\n\n return kwargs\n\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],\n defaults={'nombre': form.cleaned_data['nombre'],\n 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'],\n 'nacionalidad': form.cleaned_data['nacionalidad'],\n 'telefono': form.cleaned_data['telefono'],\n 'email_personal': form.cleaned_data['email_personal'],\n 'email_institucional': form.cleaned_data['email_institucional'],\n 'estado_civil': form.cleaned_data['estado_civil'],\n 'discapacidad': form.cleaned_data['discapacidad'],\n 'recibe_pension': form.cleaned_data['recibe_pension'],\n 'adventista': form.cleaned_data['adventista'],\n 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(reverse('carga-horaria:asistentes'))\n\n\nclass AsistenteUpdateView(LoginRequiredMixin, UpdateView):\n model = Asistente\n form_class = AsistenteForm\n template_name = 'carga_horaria/asistente/editar_asistente.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:asistente',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n def form_valid(self, form):\n asistente = form.save(commit=False)\n asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],\n defaults={'nombre': form.cleaned_data['nombre'],\n 'direccion': form.cleaned_data['direccion'],\n 'comuna': form.cleaned_data['comuna'],\n 'nacionalidad': form.cleaned_data['nacionalidad'],\n 'telefono': form.cleaned_data['telefono'],\n 'email_personal': form.cleaned_data['email_personal'],\n 'email_institucional': form.cleaned_data['email_institucional'],\n 'estado_civil': form.cleaned_data['estado_civil'],\n 'discapacidad': form.cleaned_data['discapacidad'],\n 'recibe_pension': form.cleaned_data['recibe_pension'],\n 'adventista': form.cleaned_data['adventista'],\n 'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})\n asistente.save()\n return redirect(self.get_success_url())\n\n\nclass AsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = Asistente\n success_url = reverse_lazy('carga-horaria:asistentes')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n\n\n\"\"\"\n Comienzo Crud Asignatura Base\n\"\"\"\nclass AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de asignatura base\n \"\"\"\n model = AsignaturaBase\n lookup = 'plan__colegio__pk'\n template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'\n search_fields = ['nombre', 'plan']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n\n return qs\n\n\nclass AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura base\n \"\"\"\n model = AsignaturaBase\n template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'\n\n\nclass AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n\nclass AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignaturaBase\n form_class = AsignaturaBaseForm\n template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:asignaturabase',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignaturaBase\n success_url = reverse_lazy('carga-horaria:asignaturasbase')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n\"\"\"\n Comienzo Crud Asignatura\n\"\"\"\nclass AsignaturaListView(LoginRequiredMixin, ListView):\n \"\"\"\n Listado de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/listado_asignatura.html'\n search_fields = ['base', 'periodo']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(base__plan__nivel=nivel)\n\n periodo = self.request.GET.get('periodo')\n if periodo:\n qs = qs.filter(periodo__pk=periodo)\n return qs\n\n\nclass AsignaturaDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de asignatura\n \"\"\"\n model = Asignatura\n template_name = 'carga_horaria/asignatura/detalle_asignatura.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n return ctx\n\nclass AsignaturaCreateView(LoginRequiredMixin, CreateView):\n model = Asignatura\n form_class = AsignaturaCreateForm\n template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'\n\n def form_valid(self, form):\n # dirty validation\n periodo = Periodo.objects.get(pk=self.kwargs['pk'])\n horas = form.cleaned_data['horas']\n available = periodo.available\n if horas > available:\n form.add_error('horas', \"Horas superan el tiempo disponible ({})\".format(available))\n return self.form_invalid(form)\n else:\n self.object = form.save()\n self.object.periodos.add(periodo)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:periodo',\n kwargs={\n 'pk': self.kwargs['pk'],\n }\n )\n\n\n\nclass AsignaturaUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignatura\n form_class = AsignaturaUpdateForm\n template_name = 'carga_horaria/asignatura/editar_asignatura.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs['periodo_pk']})\n\n def form_valid(self, form):\n # dirty validation\n periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])\n horas = form.cleaned_data['horas']\n old_horas = Asignatura.objects.get(pk=self.object.pk).horas\n delta = horas - old_horas\n available = periodo.available\n\n if delta > available:\n form.add_error('horas', \"Horas superan el tiempo disponible ({})\".format(available + old_horas))\n return self.form_invalid(form)\n elif self.object.base:\n if periodo.colegio.jec:\n horas_base = self.object.base.horas_jec\n else:\n horas_base = self.object.base.horas_nec\n\n if horas < horas_base:\n form.add_error('horas', \"Horas deben ser como mínimo las del plan de estudios original ({})\".format(horas_base))\n return self.form_invalid(form)\n\n return super().form_valid(form)\n\n\nclass AsignaturaDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignatura\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:periodo',\n kwargs={\n 'pk': self.kwargs['periodo_pk'],\n }\n )\n",
"step-ids": [
52,
53,
56,
73,
85
]
}
|
[
52,
53,
56,
73,
85
] |
<|reserved_special_token_0|>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
<|reserved_special_token_0|>
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
<|reserved_special_token_0|>
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
<|reserved_special_token_0|>
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
<|reserved_special_token_0|>
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
<|reserved_special_token_0|>
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
<|reserved_special_token_0|>
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
<|reserved_special_token_0|>
np.random.seed(123)
np.random.shuffle(train_valid_ind)
<|reserved_special_token_0|>
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a, oc_b, oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
ts = ts.drop_duplicates()
ii = ts.variable == 'ICUType'
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
ts.loc[ii, 'value'] = 1
means_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std'] == 0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x: not x.startswith('ICUType')
) & ~ts.variable.isin(['Age', 'Gender', 'Height'])
ts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[
ii, 'std']
train_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8 * len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
<|reserved_special_token_1|>
from tqdm import tqdm
import os
import pandas as pd
import pickle
import numpy as np
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a, oc_b, oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
ts = ts.drop_duplicates()
ii = ts.variable == 'ICUType'
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
ts.loc[ii, 'value'] = 1
means_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std'] == 0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x: not x.startswith('ICUType')
) & ~ts.variable.isin(['Age', 'Gender', 'Height'])
ts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[
ii, 'std']
train_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8 * len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
<|reserved_special_token_1|>
from tqdm import tqdm
import os
import pandas as pd
import pickle
import numpy as np
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i+start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path+'/set-'+d), desc='Reading time series set '+d)
for f in pbar:
data = pd.read_csv(raw_data_path+'/set-'+d+'/'+f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data)<=5:
continue
data = data.loc[data.Value>=0] # neg Value indicates missingness.
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x:int(x[:2])+int(x[3:])/60) # No. of hours since admission.
ts.rename(columns={'Time':'hour', 'Parameter':'variable', 'Value':'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path+'/Outcomes-a.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path+'/Outcomes-b.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path+'/Outcomes-c.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a,oc_b,oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay':'length_of_stay', 'In-hospital_death':'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
# Drop duplicates.
ts = ts.drop_duplicates()
# Convert categorical to numeric.
ii = (ts.variable=='ICUType')
for val in [4,3,2,1]:
kk = ii&(ts.value==val)
ts.loc[kk, 'variable'] = 'ICUType_'+str(val)
ts.loc[ii, 'value'] = 1
# Normalize data except Age, Gender, Height, ICUType.
means_stds = ts.groupby('variable').agg({'value':['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std']==0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x:not(x.startswith('ICUType')))&(~ts.variable.isin(['Age', 'Gender', 'Height']))
ts.loc[ii, 'value'] = (ts.loc[ii, 'value']-ts.loc[ii, 'mean'])/ts.loc[ii, 'std']
# Generate split.
train_valid_ind = np.array(oc.loc[oc.subset!='a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8*len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset=='a'].ts_ind)
oc.drop(columns='subset', inplace=True)
# Store data.
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open('physionet_2012_preprocessed.pkl','wb'))
|
flexible
|
{
"blob_id": "3e07a2a2d0a810c016720fa41d71d0771cbccfef",
"index": 626,
"step-1": "<mask token>\n\n\ndef inv_list(l, start=0):\n d = {}\n for i in range(len(l)):\n d[l[i]] = i + start\n return d\n\n\n<mask token>\n\n\ndef read_dataset(d):\n ts = []\n pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=\n 'Reading time series set ' + d)\n for f in pbar:\n data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]\n data = data.loc[data.Parameter.notna()]\n if len(data) <= 5:\n continue\n data = data.loc[data.Value >= 0]\n data['RecordID'] = f[:-4]\n ts.append(data)\n ts = pd.concat(ts)\n return ts\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef inv_list(l, start=0):\n d = {}\n for i in range(len(l)):\n d[l[i]] = i + start\n return d\n\n\n<mask token>\n\n\ndef read_dataset(d):\n ts = []\n pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=\n 'Reading time series set ' + d)\n for f in pbar:\n data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]\n data = data.loc[data.Parameter.notna()]\n if len(data) <= 5:\n continue\n data = data.loc[data.Value >= 0]\n data['RecordID'] = f[:-4]\n ts.append(data)\n ts = pd.concat(ts)\n return ts\n\n\n<mask token>\nts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':\n 'value'}, inplace=True)\n<mask token>\noc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':\n 'in_hospital_mortality'}, inplace=True)\n<mask token>\nts.drop(columns='RecordID', inplace=True)\noc.drop(columns='RecordID', inplace=True)\n<mask token>\nfor val in [4, 3, 2, 1]:\n kk = ii & (ts.value == val)\n ts.loc[kk, 'variable'] = 'ICUType_' + str(val)\n<mask token>\nnp.random.seed(123)\nnp.random.shuffle(train_valid_ind)\n<mask token>\noc.drop(columns='subset', inplace=True)\npickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(\n 'physionet_2012_preprocessed.pkl', 'wb'))\n",
"step-3": "<mask token>\n\n\ndef inv_list(l, start=0):\n d = {}\n for i in range(len(l)):\n d[l[i]] = i + start\n return d\n\n\nraw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'\n\n\ndef read_dataset(d):\n ts = []\n pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=\n 'Reading time series set ' + d)\n for f in pbar:\n data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]\n data = data.loc[data.Parameter.notna()]\n if len(data) <= 5:\n continue\n data = data.loc[data.Value >= 0]\n data['RecordID'] = f[:-4]\n ts.append(data)\n ts = pd.concat(ts)\n return ts\n\n\nts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))\nts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)\nts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':\n 'value'}, inplace=True)\noc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_a['subset'] = 'a'\noc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_b['subset'] = 'b'\noc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_c['subset'] = 'c'\noc = pd.concat((oc_a, oc_b, oc_c))\noc.RecordID = oc.RecordID.astype(str)\noc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':\n 'in_hospital_mortality'}, inplace=True)\nrec_ids = sorted(list(ts.RecordID.unique()))\nrid_to_ind = inv_list(rec_ids)\noc = oc.loc[oc.RecordID.isin(rec_ids)]\nts['ts_ind'] = ts.RecordID.map(rid_to_ind)\noc['ts_ind'] = oc.RecordID.map(rid_to_ind)\nts.drop(columns='RecordID', inplace=True)\noc.drop(columns='RecordID', inplace=True)\nts = ts.drop_duplicates()\nii = ts.variable == 'ICUType'\nfor val in [4, 3, 2, 1]:\n kk = ii & (ts.value == val)\n ts.loc[kk, 'variable'] = 'ICUType_' + str(val)\nts.loc[ii, 'value'] = 1\nmeans_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})\nmeans_stds.columns = [col[1] for col in means_stds.columns]\nmeans_stds.loc[means_stds['std'] == 0, 'std'] = 1\nts = ts.merge(means_stds.reset_index(), on='variable', how='left')\nii = ts.variable.apply(lambda x: not x.startswith('ICUType')\n ) & ~ts.variable.isin(['Age', 'Gender', 'Height'])\nts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[\n ii, 'std']\ntrain_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)\nnp.random.seed(123)\nnp.random.shuffle(train_valid_ind)\nbp = int(0.8 * len(train_valid_ind))\ntrain_ind = train_valid_ind[:bp]\nvalid_ind = train_valid_ind[bp:]\ntest_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)\noc.drop(columns='subset', inplace=True)\npickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(\n 'physionet_2012_preprocessed.pkl', 'wb'))\n",
"step-4": "from tqdm import tqdm\nimport os\nimport pandas as pd\nimport pickle\nimport numpy as np\n\n\ndef inv_list(l, start=0):\n d = {}\n for i in range(len(l)):\n d[l[i]] = i + start\n return d\n\n\nraw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'\n\n\ndef read_dataset(d):\n ts = []\n pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=\n 'Reading time series set ' + d)\n for f in pbar:\n data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]\n data = data.loc[data.Parameter.notna()]\n if len(data) <= 5:\n continue\n data = data.loc[data.Value >= 0]\n data['RecordID'] = f[:-4]\n ts.append(data)\n ts = pd.concat(ts)\n return ts\n\n\nts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))\nts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)\nts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':\n 'value'}, inplace=True)\noc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_a['subset'] = 'a'\noc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_b['subset'] = 'b'\noc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',\n 'Length_of_stay', 'In-hospital_death'])\noc_c['subset'] = 'c'\noc = pd.concat((oc_a, oc_b, oc_c))\noc.RecordID = oc.RecordID.astype(str)\noc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':\n 'in_hospital_mortality'}, inplace=True)\nrec_ids = sorted(list(ts.RecordID.unique()))\nrid_to_ind = inv_list(rec_ids)\noc = oc.loc[oc.RecordID.isin(rec_ids)]\nts['ts_ind'] = ts.RecordID.map(rid_to_ind)\noc['ts_ind'] = oc.RecordID.map(rid_to_ind)\nts.drop(columns='RecordID', inplace=True)\noc.drop(columns='RecordID', inplace=True)\nts = ts.drop_duplicates()\nii = ts.variable == 'ICUType'\nfor val in [4, 3, 2, 1]:\n kk = ii & (ts.value == val)\n ts.loc[kk, 'variable'] = 'ICUType_' + str(val)\nts.loc[ii, 'value'] = 1\nmeans_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})\nmeans_stds.columns = [col[1] for col in means_stds.columns]\nmeans_stds.loc[means_stds['std'] == 0, 'std'] = 1\nts = ts.merge(means_stds.reset_index(), on='variable', how='left')\nii = ts.variable.apply(lambda x: not x.startswith('ICUType')\n ) & ~ts.variable.isin(['Age', 'Gender', 'Height'])\nts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[\n ii, 'std']\ntrain_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)\nnp.random.seed(123)\nnp.random.shuffle(train_valid_ind)\nbp = int(0.8 * len(train_valid_ind))\ntrain_ind = train_valid_ind[:bp]\nvalid_ind = train_valid_ind[bp:]\ntest_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)\noc.drop(columns='subset', inplace=True)\npickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(\n 'physionet_2012_preprocessed.pkl', 'wb'))\n",
"step-5": "from tqdm import tqdm\nimport os\nimport pandas as pd\nimport pickle\nimport numpy as np\n\ndef inv_list(l, start=0):\n d = {}\n for i in range(len(l)):\n d[l[i]] = i+start\n return d\n\nraw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'\ndef read_dataset(d):\n ts = []\n pbar = tqdm(os.listdir(raw_data_path+'/set-'+d), desc='Reading time series set '+d)\n for f in pbar:\n data = pd.read_csv(raw_data_path+'/set-'+d+'/'+f).iloc[1:]\n data = data.loc[data.Parameter.notna()]\n if len(data)<=5:\n continue\n data = data.loc[data.Value>=0] # neg Value indicates missingness.\n data['RecordID'] = f[:-4]\n ts.append(data)\n ts = pd.concat(ts)\n return ts\n\nts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))\nts.Time = ts.Time.apply(lambda x:int(x[:2])+int(x[3:])/60) # No. of hours since admission.\nts.rename(columns={'Time':'hour', 'Parameter':'variable', 'Value':'value'}, inplace=True)\noc_a = pd.read_csv(raw_data_path+'/Outcomes-a.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])\noc_a['subset'] = 'a'\noc_b = pd.read_csv(raw_data_path+'/Outcomes-b.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])\noc_b['subset'] = 'b'\noc_c = pd.read_csv(raw_data_path+'/Outcomes-c.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])\noc_c['subset'] = 'c'\noc = pd.concat((oc_a,oc_b,oc_c))\noc.RecordID = oc.RecordID.astype(str)\noc.rename(columns={'Length_of_stay':'length_of_stay', 'In-hospital_death':'in_hospital_mortality'}, inplace=True)\nrec_ids = sorted(list(ts.RecordID.unique()))\nrid_to_ind = inv_list(rec_ids)\noc = oc.loc[oc.RecordID.isin(rec_ids)]\nts['ts_ind'] = ts.RecordID.map(rid_to_ind)\noc['ts_ind'] = oc.RecordID.map(rid_to_ind)\nts.drop(columns='RecordID', inplace=True)\noc.drop(columns='RecordID', inplace=True)\n\n# Drop duplicates.\nts = ts.drop_duplicates()\n\n# Convert categorical to numeric.\nii = (ts.variable=='ICUType')\nfor val in [4,3,2,1]:\n kk = ii&(ts.value==val)\n ts.loc[kk, 'variable'] = 'ICUType_'+str(val)\nts.loc[ii, 'value'] = 1\n \n# Normalize data except Age, Gender, Height, ICUType.\nmeans_stds = ts.groupby('variable').agg({'value':['mean', 'std']})\nmeans_stds.columns = [col[1] for col in means_stds.columns]\nmeans_stds.loc[means_stds['std']==0, 'std'] = 1\nts = ts.merge(means_stds.reset_index(), on='variable', how='left')\nii = ts.variable.apply(lambda x:not(x.startswith('ICUType')))&(~ts.variable.isin(['Age', 'Gender', 'Height']))\nts.loc[ii, 'value'] = (ts.loc[ii, 'value']-ts.loc[ii, 'mean'])/ts.loc[ii, 'std']\n\n# Generate split.\ntrain_valid_ind = np.array(oc.loc[oc.subset!='a'].ts_ind)\nnp.random.seed(123)\nnp.random.shuffle(train_valid_ind)\nbp = int(0.8*len(train_valid_ind))\ntrain_ind = train_valid_ind[:bp]\nvalid_ind = train_valid_ind[bp:]\ntest_ind = np.array(oc.loc[oc.subset=='a'].ts_ind)\noc.drop(columns='subset', inplace=True)\n\n# Store data.\npickle.dump([ts, oc, train_ind, valid_ind, test_ind], open('physionet_2012_preprocessed.pkl','wb'))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Boundary:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Boundary:
<|reserved_special_token_0|>
def show(self):
self.py5.stroke(255)
self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)
<|reserved_special_token_1|>
class Boundary:
def __init__(self, py5_inst, x1, y1, x2, y2):
self.py5 = py5_inst
self.a = self.py5.create_vector(x1, y1)
self.b = self.py5.create_vector(x2, y2)
def show(self):
self.py5.stroke(255)
self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)
<|reserved_special_token_1|>
class Boundary():
def __init__(self, py5_inst, x1, y1, x2, y2):
self.py5 = py5_inst
self.a = self.py5.create_vector(x1, y1)
self.b = self.py5.create_vector(x2, y2)
def show(self):
self.py5.stroke(255)
self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)
|
flexible
|
{
"blob_id": "df00cc501b7b682cc1f4fbc9ae87a27984e6b5ef",
"index": 5424,
"step-1": "<mask token>\n",
"step-2": "class Boundary:\n <mask token>\n <mask token>\n",
"step-3": "class Boundary:\n <mask token>\n\n def show(self):\n self.py5.stroke(255)\n self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)\n",
"step-4": "class Boundary:\n\n def __init__(self, py5_inst, x1, y1, x2, y2):\n self.py5 = py5_inst\n self.a = self.py5.create_vector(x1, y1)\n self.b = self.py5.create_vector(x2, y2)\n\n def show(self):\n self.py5.stroke(255)\n self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)\n",
"step-5": "class Boundary():\n def __init__(self, py5_inst, x1, y1, x2, y2):\n self.py5 = py5_inst\n self.a = self.py5.create_vector(x1, y1)\n self.b = self.py5.create_vector(x2, y2)\n\n def show(self):\n self.py5.stroke(255)\n self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def is_prime(n):
if n == 1:
return False
if n < 4:
return True
if n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(n):
if n == 1:
return False
if n < 4:
return True
if n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(n):
if n == 1:
return False
if n < 4:
return True
if n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import math
def is_prime(n):
if n == 1:
return False
if n < 4:
return True
if n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import math
def is_prime(n):
# Based on the Sieve of Eratosthenes
if n == 1:
return False
if n < 4:
# 2 and 3 are prime
return True
if n % 2 == 0:
return False
if n < 9:
# 5 and 7 are prime (we have already excluded 4, 6 and 8)
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
# We know that 2 is prime
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "3970c7768e892ad217c193b1d967c1203b7e9a25",
"index": 6512,
"step-1": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import math\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import math\n\n\ndef is_prime(n):\n # Based on the Sieve of Eratosthenes\n if n == 1:\n return False\n if n < 4:\n # 2 and 3 are prime\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n # 5 and 7 are prime (we have already excluded 4, 6 and 8)\n return True\n if n % 3 == 0:\n return False\n\n root = math.sqrt(n)\n f = 5\n\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n\n return True\n\n\ndef main():\n limit = 10001\n # We know that 2 is prime\n count = 1\n candidate = 1\n\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import socket
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 8886)
sock.connect(server_address)
data = "TCP"
length = len(data)
ret = bytearray([])
for byte in data.encode("utf-8"):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c6fd848bb3d845a50b928c18a51f296a500e7746",
"index": 2922,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('localhost', 8886)\n sock.connect(server_address)\n\n data = \"TCP\"\n length = len(data)\n ret = bytearray([])\n for byte in data.encode(\"utf-8\"):\n ret.append(byte)\n sock.sendall(ret)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, 'input'), 'r') as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, 'input'), 'r') as f:
tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 2106818610
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_get_map_cell():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert get_map_cell(map_template.split('\n'), 1, 10) == '.'
assert get_map_cell(map_template.split('\n'), 1, 10 + 11) == '.'
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, 'input'), 'r') as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, 'input'), 'r') as f:
tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 2106818610
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
def test_get_map_cell():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert get_map_cell(map_template.split('\n'), 1, 10) == '.'
assert get_map_cell(map_template.split('\n'), 1, 10 + 11) == '.'
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, 'input'), 'r') as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, 'input'), 'r') as f:
tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 2106818610
<|reserved_special_token_1|>
import logging
import os.path
from day03.code.main import traverse_map, get_map_cell, traverse_map_multiple_slopes
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
def test_get_map_cell():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert get_map_cell(map_template.split('\n'), 1, 10) == '.'
assert get_map_cell(map_template.split('\n'), 1, 10 + 11) == '.'
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, 'input'), 'r') as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, 'input'), 'r') as f:
tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1,
3], [1, 5], [1, 7], [2, 1]])
assert tree_product == 2106818610
<|reserved_special_token_1|>
import logging
import os.path
from day03.code.main import traverse_map, get_map_cell, traverse_map_multiple_slopes
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
def test_get_map_cell():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert get_map_cell(map_template.split("\n"), 1, 10) == "."
assert get_map_cell(map_template.split("\n"), 1, 10 + 11) == "."
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, "input"), "r") as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(
map_template, [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]
)
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, "input"), "r") as f:
tree_product = traverse_map_multiple_slopes(
f.read(), [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]
)
assert tree_product == 2106818610
|
flexible
|
{
"blob_id": "7599f13d1cabe73d876ff97722962f2fcf9a9940",
"index": 2269,
"step-1": "<mask token>\n\n\ndef test_sample_input():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n logger.info(traverse_map(map_template))\n\n\ndef test_sample_input_custom_slope():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert traverse_map(map_template, slope=[1, 1]) == 2\n assert traverse_map(map_template, slope=[1, 3]) == 7\n assert traverse_map(map_template, slope=[2, 1]) == 2\n\n\ndef test_big_input():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n found_trees = traverse_map(f.read())\n assert found_trees == 237\n\n\ndef test_sample_input_with_multiple_slopes():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 336\n\n\ndef test_big_input_with_multiple_slopes():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 2106818610\n",
"step-2": "<mask token>\n\n\ndef test_get_map_cell():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert get_map_cell(map_template.split('\\n'), 1, 10) == '.'\n assert get_map_cell(map_template.split('\\n'), 1, 10 + 11) == '.'\n\n\ndef test_sample_input():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n logger.info(traverse_map(map_template))\n\n\ndef test_sample_input_custom_slope():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert traverse_map(map_template, slope=[1, 1]) == 2\n assert traverse_map(map_template, slope=[1, 3]) == 7\n assert traverse_map(map_template, slope=[2, 1]) == 2\n\n\ndef test_big_input():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n found_trees = traverse_map(f.read())\n assert found_trees == 237\n\n\ndef test_sample_input_with_multiple_slopes():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 336\n\n\ndef test_big_input_with_multiple_slopes():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 2106818610\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\nlocal_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef test_get_map_cell():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert get_map_cell(map_template.split('\\n'), 1, 10) == '.'\n assert get_map_cell(map_template.split('\\n'), 1, 10 + 11) == '.'\n\n\ndef test_sample_input():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n logger.info(traverse_map(map_template))\n\n\ndef test_sample_input_custom_slope():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert traverse_map(map_template, slope=[1, 1]) == 2\n assert traverse_map(map_template, slope=[1, 3]) == 7\n assert traverse_map(map_template, slope=[2, 1]) == 2\n\n\ndef test_big_input():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n found_trees = traverse_map(f.read())\n assert found_trees == 237\n\n\ndef test_sample_input_with_multiple_slopes():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 336\n\n\ndef test_big_input_with_multiple_slopes():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 2106818610\n",
"step-4": "import logging\nimport os.path\nfrom day03.code.main import traverse_map, get_map_cell, traverse_map_multiple_slopes\nlogger = logging.getLogger(__name__)\nlocal_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef test_get_map_cell():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert get_map_cell(map_template.split('\\n'), 1, 10) == '.'\n assert get_map_cell(map_template.split('\\n'), 1, 10 + 11) == '.'\n\n\ndef test_sample_input():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n logger.info(traverse_map(map_template))\n\n\ndef test_sample_input_custom_slope():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n assert traverse_map(map_template, slope=[1, 1]) == 2\n assert traverse_map(map_template, slope=[1, 3]) == 7\n assert traverse_map(map_template, slope=[2, 1]) == 2\n\n\ndef test_big_input():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n found_trees = traverse_map(f.read())\n assert found_trees == 237\n\n\ndef test_sample_input_with_multiple_slopes():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n tree_product = traverse_map_multiple_slopes(map_template, [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 336\n\n\ndef test_big_input_with_multiple_slopes():\n with open(os.path.join(local_path, 'input'), 'r') as f:\n tree_product = traverse_map_multiple_slopes(f.read(), [[1, 1], [1, \n 3], [1, 5], [1, 7], [2, 1]])\n assert tree_product == 2106818610\n",
"step-5": "import logging\nimport os.path\n\nfrom day03.code.main import traverse_map, get_map_cell, traverse_map_multiple_slopes\n\nlogger = logging.getLogger(__name__)\nlocal_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef test_get_map_cell():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n\n assert get_map_cell(map_template.split(\"\\n\"), 1, 10) == \".\"\n assert get_map_cell(map_template.split(\"\\n\"), 1, 10 + 11) == \".\"\n\n\ndef test_sample_input():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n\n logger.info(traverse_map(map_template))\n\n\ndef test_sample_input_custom_slope():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n\n assert traverse_map(map_template, slope=[1, 1]) == 2\n assert traverse_map(map_template, slope=[1, 3]) == 7\n assert traverse_map(map_template, slope=[2, 1]) == 2\n\n\ndef test_big_input():\n with open(os.path.join(local_path, \"input\"), \"r\") as f:\n found_trees = traverse_map(f.read())\n assert found_trees == 237\n\n\ndef test_sample_input_with_multiple_slopes():\n map_template = \"\"\"..##.......\n#...#...#..\n.#....#..#.\n..#.#...#.#\n.#...##..#.\n..#.##.....\n.#.#.#....#\n.#........#\n#.##...#...\n#...##....#\n.#..#...#.#\"\"\"\n\n tree_product = traverse_map_multiple_slopes(\n map_template, [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]\n )\n assert tree_product == 336\n\n\ndef test_big_input_with_multiple_slopes():\n with open(os.path.join(local_path, \"input\"), \"r\") as f:\n tree_product = traverse_map_multiple_slopes(\n f.read(), [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]\n )\n assert tree_product == 2106818610\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from selenium import webdriver
import math
import time
browser = webdriver.Chrome()
website = 'http://suninjuly.github.io/find_link_text'
link_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))
browser.get(website)
find_link = browser.find_element_by_link_text(link_text)
find_link.click()
input_first_name = browser.find_element_by_tag_name('input')
input_first_name.send_keys('Timur')
input_last_name = browser.find_element_by_name('last_name')
input_last_name.send_keys('Atabaev')
input_city = browser.find_element_by_class_name('city')
input_city.send_keys('Tashkent')
input_country = browser.find_element_by_id('country')
input_country.send_keys('Uzbekistan')
button = browser.find_element_by_css_selector('button.btn')
button.click()
|
normal
|
{
"blob_id": "aa17e22bc13436333b1db4aee41eeced373119a8",
"index": 5704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbrowser.get(website)\n<mask token>\nfind_link.click()\n<mask token>\ninput_first_name.send_keys('Timur')\n<mask token>\ninput_last_name.send_keys('Atabaev')\n<mask token>\ninput_city.send_keys('Tashkent')\n<mask token>\ninput_country.send_keys('Uzbekistan')\n<mask token>\nbutton.click()\n",
"step-3": "<mask token>\nbrowser = webdriver.Chrome()\nwebsite = 'http://suninjuly.github.io/find_link_text'\nlink_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))\nbrowser.get(website)\nfind_link = browser.find_element_by_link_text(link_text)\nfind_link.click()\ninput_first_name = browser.find_element_by_tag_name('input')\ninput_first_name.send_keys('Timur')\ninput_last_name = browser.find_element_by_name('last_name')\ninput_last_name.send_keys('Atabaev')\ninput_city = browser.find_element_by_class_name('city')\ninput_city.send_keys('Tashkent')\ninput_country = browser.find_element_by_id('country')\ninput_country.send_keys('Uzbekistan')\nbutton = browser.find_element_by_css_selector('button.btn')\nbutton.click()\n",
"step-4": "from selenium import webdriver\nimport math\nimport time\nbrowser = webdriver.Chrome()\nwebsite = 'http://suninjuly.github.io/find_link_text'\nlink_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))\nbrowser.get(website)\nfind_link = browser.find_element_by_link_text(link_text)\nfind_link.click()\ninput_first_name = browser.find_element_by_tag_name('input')\ninput_first_name.send_keys('Timur')\ninput_last_name = browser.find_element_by_name('last_name')\ninput_last_name.send_keys('Atabaev')\ninput_city = browser.find_element_by_class_name('city')\ninput_city.send_keys('Tashkent')\ninput_country = browser.find_element_by_id('country')\ninput_country.send_keys('Uzbekistan')\nbutton = browser.find_element_by_css_selector('button.btn')\nbutton.click()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for k in range(grid):
for j in range(grid):
for i in range(grid):
x = (i / grid + 0.25) * tau
y = (j / grid + 0.25) * tau
z = (k / grid + 0.25) * tau
u = cos(x) * sin(y) * cos(z)
v = -sin(x) * cos(y) * cos(z)
w = 0.0
velocity.append(Velocity(u, v, w))
with open('taylor-green.vtk', 'w') as f:
f.write(
f"""# vtk DataFile Version 2.0
test
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS {grid} {grid} {grid}
ORIGIN 0.0 0.0 0.0
SPACING 1.0 1.0 1.0
POINT_DATA {points}
VECTORS velocity float
"""
)
for v in velocity:
f.write(f'{v.x} {v.y} {v.z}\n')
f.write('SCALARS angle float\n')
f.write('LOOKUP_TABLE default\n')
for v in velocity:
f.write('%f\n' % atan2(v.y, v.x))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
grid = 21
c = grid / 2
points = grid ** 3
Velocity = namedtuple('Velocity', ('x', 'y', 'z'))
velocity = []
for k in range(grid):
for j in range(grid):
for i in range(grid):
x = (i / grid + 0.25) * tau
y = (j / grid + 0.25) * tau
z = (k / grid + 0.25) * tau
u = cos(x) * sin(y) * cos(z)
v = -sin(x) * cos(y) * cos(z)
w = 0.0
velocity.append(Velocity(u, v, w))
with open('taylor-green.vtk', 'w') as f:
f.write(
f"""# vtk DataFile Version 2.0
test
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS {grid} {grid} {grid}
ORIGIN 0.0 0.0 0.0
SPACING 1.0 1.0 1.0
POINT_DATA {points}
VECTORS velocity float
"""
)
for v in velocity:
f.write(f'{v.x} {v.y} {v.z}\n')
f.write('SCALARS angle float\n')
f.write('LOOKUP_TABLE default\n')
for v in velocity:
f.write('%f\n' % atan2(v.y, v.x))
<|reserved_special_token_1|>
from collections import namedtuple
from math import tau, sin, cos, atan2
grid = 21
c = grid / 2
points = grid ** 3
Velocity = namedtuple('Velocity', ('x', 'y', 'z'))
velocity = []
for k in range(grid):
for j in range(grid):
for i in range(grid):
x = (i / grid + 0.25) * tau
y = (j / grid + 0.25) * tau
z = (k / grid + 0.25) * tau
u = cos(x) * sin(y) * cos(z)
v = -sin(x) * cos(y) * cos(z)
w = 0.0
velocity.append(Velocity(u, v, w))
with open('taylor-green.vtk', 'w') as f:
f.write(
f"""# vtk DataFile Version 2.0
test
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS {grid} {grid} {grid}
ORIGIN 0.0 0.0 0.0
SPACING 1.0 1.0 1.0
POINT_DATA {points}
VECTORS velocity float
"""
)
for v in velocity:
f.write(f'{v.x} {v.y} {v.z}\n')
f.write('SCALARS angle float\n')
f.write('LOOKUP_TABLE default\n')
for v in velocity:
f.write('%f\n' % atan2(v.y, v.x))
<|reserved_special_token_1|>
from collections import namedtuple
from math import tau, sin, cos, atan2
grid = 21
c = grid / 2
points = grid**3
Velocity = namedtuple('Velocity', ('x', 'y', 'z'))
velocity = []
for k in range(grid):
for j in range(grid):
for i in range(grid):
x = (i / grid + 0.25) * tau
y = (j / grid + 0.25) * tau
z = (k / grid + 0.25) * tau
u = cos(x) * sin(y) * cos(z)
v = -sin(x) * cos(y) * cos(z)
w = 0.0
velocity.append(Velocity(u, v, w))
with open('taylor-green.vtk', 'w') as f:
f.write(f"""\
# vtk DataFile Version 2.0
test
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS {grid} {grid} {grid}
ORIGIN 0.0 0.0 0.0
SPACING 1.0 1.0 1.0
POINT_DATA {points}
VECTORS velocity float
""")
for v in velocity:
f.write(f"{v.x} {v.y} {v.z}\n")
f.write("SCALARS angle float\n")
f.write("LOOKUP_TABLE default\n")
for v in velocity:
f.write("%f\n" % atan2(v.y, v.x))
|
flexible
|
{
"blob_id": "d70986b016e58877c39bfbb76c5bd622c44cbca9",
"index": 9273,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k in range(grid):\n for j in range(grid):\n for i in range(grid):\n x = (i / grid + 0.25) * tau\n y = (j / grid + 0.25) * tau\n z = (k / grid + 0.25) * tau\n u = cos(x) * sin(y) * cos(z)\n v = -sin(x) * cos(y) * cos(z)\n w = 0.0\n velocity.append(Velocity(u, v, w))\nwith open('taylor-green.vtk', 'w') as f:\n f.write(\n f\"\"\"# vtk DataFile Version 2.0\ntest\nASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS {grid} {grid} {grid}\nORIGIN 0.0 0.0 0.0\nSPACING 1.0 1.0 1.0\n\nPOINT_DATA {points}\nVECTORS velocity float\n\"\"\"\n )\n for v in velocity:\n f.write(f'{v.x} {v.y} {v.z}\\n')\n f.write('SCALARS angle float\\n')\n f.write('LOOKUP_TABLE default\\n')\n for v in velocity:\n f.write('%f\\n' % atan2(v.y, v.x))\n",
"step-3": "<mask token>\ngrid = 21\nc = grid / 2\npoints = grid ** 3\nVelocity = namedtuple('Velocity', ('x', 'y', 'z'))\nvelocity = []\nfor k in range(grid):\n for j in range(grid):\n for i in range(grid):\n x = (i / grid + 0.25) * tau\n y = (j / grid + 0.25) * tau\n z = (k / grid + 0.25) * tau\n u = cos(x) * sin(y) * cos(z)\n v = -sin(x) * cos(y) * cos(z)\n w = 0.0\n velocity.append(Velocity(u, v, w))\nwith open('taylor-green.vtk', 'w') as f:\n f.write(\n f\"\"\"# vtk DataFile Version 2.0\ntest\nASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS {grid} {grid} {grid}\nORIGIN 0.0 0.0 0.0\nSPACING 1.0 1.0 1.0\n\nPOINT_DATA {points}\nVECTORS velocity float\n\"\"\"\n )\n for v in velocity:\n f.write(f'{v.x} {v.y} {v.z}\\n')\n f.write('SCALARS angle float\\n')\n f.write('LOOKUP_TABLE default\\n')\n for v in velocity:\n f.write('%f\\n' % atan2(v.y, v.x))\n",
"step-4": "from collections import namedtuple\nfrom math import tau, sin, cos, atan2\ngrid = 21\nc = grid / 2\npoints = grid ** 3\nVelocity = namedtuple('Velocity', ('x', 'y', 'z'))\nvelocity = []\nfor k in range(grid):\n for j in range(grid):\n for i in range(grid):\n x = (i / grid + 0.25) * tau\n y = (j / grid + 0.25) * tau\n z = (k / grid + 0.25) * tau\n u = cos(x) * sin(y) * cos(z)\n v = -sin(x) * cos(y) * cos(z)\n w = 0.0\n velocity.append(Velocity(u, v, w))\nwith open('taylor-green.vtk', 'w') as f:\n f.write(\n f\"\"\"# vtk DataFile Version 2.0\ntest\nASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS {grid} {grid} {grid}\nORIGIN 0.0 0.0 0.0\nSPACING 1.0 1.0 1.0\n\nPOINT_DATA {points}\nVECTORS velocity float\n\"\"\"\n )\n for v in velocity:\n f.write(f'{v.x} {v.y} {v.z}\\n')\n f.write('SCALARS angle float\\n')\n f.write('LOOKUP_TABLE default\\n')\n for v in velocity:\n f.write('%f\\n' % atan2(v.y, v.x))\n",
"step-5": "from collections import namedtuple\nfrom math import tau, sin, cos, atan2\n\ngrid = 21\nc = grid / 2\npoints = grid**3\n\nVelocity = namedtuple('Velocity', ('x', 'y', 'z'))\n\nvelocity = []\nfor k in range(grid):\n for j in range(grid):\n for i in range(grid):\n x = (i / grid + 0.25) * tau \n y = (j / grid + 0.25) * tau\n z = (k / grid + 0.25) * tau \n u = cos(x) * sin(y) * cos(z)\n v = -sin(x) * cos(y) * cos(z)\n w = 0.0\n velocity.append(Velocity(u, v, w))\n\nwith open('taylor-green.vtk', 'w') as f:\n f.write(f\"\"\"\\\n# vtk DataFile Version 2.0\ntest\nASCII\nDATASET STRUCTURED_POINTS\nDIMENSIONS {grid} {grid} {grid}\nORIGIN 0.0 0.0 0.0\nSPACING 1.0 1.0 1.0\n\nPOINT_DATA {points}\nVECTORS velocity float\n\"\"\")\n for v in velocity:\n f.write(f\"{v.x} {v.y} {v.z}\\n\")\n\n f.write(\"SCALARS angle float\\n\")\n f.write(\"LOOKUP_TABLE default\\n\")\n \n for v in velocity:\n f.write(\"%f\\n\" % atan2(v.y, v.x))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self,trainfile,testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
# self.__dtree = DecisionTreeClassifier()
# self.__rforest = RandomForestClassifier()
# self.__svm = SVC(kernel='rbf')
self.lgb_params = {
'feature_fraction': 1,
'metric': 'rmse',
'min_data_in_leaf': 16,
'bagging_fraction': 0.85,
'learning_rate': 0.03,
'objective': 'mse',
'bagging_seed': 2 ** 7,
'num_leaves': 32,
'bagging_freq': 3,
'verbose': 0
}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)
self._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)
df = df.dropna()
df = df.loc[df['item_cnt_day']>0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
self.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df;
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data,self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
# print ("Linear Regression score " + str(self.__lr.score(self.val_data, self.val_labels)))
print ("Linear Regression score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data,self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print ("ExtraTreeRegressor score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print ("LightGBM score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data,self.train_labels,eval_metric="rmse",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print ("XGBoost score " + str(rmse(self.predicted_labels,self.val_labels)))
if __name__ == "__main__":
train_data_name = sys.argv[1]
test_data_name = sys.argv[2]
model = predict(train_data_name,test_data_name)
model.data()
# model.trainLinearRegression()
# model.testLinearRegression()
# model.trainExtraTreeRegressor()
# model.testExtraTreeRegressor()
# model.trainLightGBM()
# model.testLightGBM()
# model.trainXGBoost()
# model.testXGBoost()
# plotConfusionMatrix(model.test_labels,model.predicted_labels)
# model.trainDecesionTree()
# model.testDecesionTree()
# model.trainRandomForrest()
# model.testRandomForrest()
# model.trainSVM()
# model.testSVM()
|
normal
|
{
"blob_id": "ee49ce63951721458cb98b370285d04231bb2c20",
"index": 7438,
"step-1": "<mask token>\n\n\nclass predict(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n <mask token>\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n <mask token>\n <mask token>\n <mask token>\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\n<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\nif __name__ == '__main__':\n train_data_name = sys.argv[1]\n test_data_name = sys.argv[2]\n model = predict(train_data_name, test_data_name)\n model.data()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\n\n\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n\tdef __init__(self,trainfile,testfile):\n\t\tself.trainfile = trainfile\n\t\tself.testfile = testfile\n\t\tself.__lr = LinearRegression()\n\t\t# self.__dtree = DecisionTreeClassifier()\n\t\t# self.__rforest = RandomForestClassifier()\n\t\t# self.__svm = SVC(kernel='rbf')\n\t\tself.lgb_params = {\n 'feature_fraction': 1,\n 'metric': 'rmse',\n 'min_data_in_leaf': 16,\n 'bagging_fraction': 0.85,\n 'learning_rate': 0.03,\n 'objective': 'mse',\n 'bagging_seed': 2 ** 7,\n 'num_leaves': 32,\n 'bagging_freq': 3,\n 'verbose': 0\n \t}\n\t\tself.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)\n\t\tself._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)\n\t\tself.train_data = None\n\t\tself.train_labels = None\n\t\tself.train_data1 = None\n\t\tself.train_labels1 = None\n\t\tself.val_data = None\n\t\tself.val_labels = None\n\t\tself.test_data = None\n\t\tself.predicted_labels = None\n\t\tself.x_train_val = None\n\t\tself.y_train_val = None\n\n\tdef trainingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)\n\t\tdf = df.dropna()\n\t\tdf = df.loc[df['item_cnt_day']>0]\n\t\tsubset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']\n\t\tdrop_duplicate(df, sub_set=subset_train)\n\t\tmedian = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()\n\t\tdf.loc[df.item_price < 0, 'item_price'] = median\n\t\tdf['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n\t\tdf['item_price'] = df['item_price'].clip(0, 300000)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.train_labels1 = df['item_cnt_day']\n\t\tself.train_data1 = df.drop(columns='item_cnt_day')\n\t\tself.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)\n\t\tself.x_train_val = self.train_data[-100:]\n\t\tself.y_train_val = self.train_labels[-100:]\n\n\n\tdef testingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)\n\t\tsubset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n\t\tdrop_duplicate(df, sub_set=subset_test)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.test_data = df;\n\n\tdef data(self):\n\t\tself.trainingdata()\n\t\tself.testingdata()\n\n\tdef trainLinearRegression(self):\n\t\tself.__lr.fit(self.train_data,self.train_labels)\n\n\tdef testLinearRegression(self):\n\t\tself.predicted_labels = self.__lr.predict(self.val_data)\n\t\t# print (\"Linear Regression score \" + str(self.__lr.score(self.val_data, self.val_labels)))\n\t\tprint (\"Linear Regression score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainExtraTreeRegressor(self):\n\t\tself.__tree_reg.fit(self.train_data,self.train_labels)\n\n\tdef testExtraTreeRegressor(self):\n\t\tself.predicted_labels = self.__tree_reg.predict(self.val_data)\n\t\tprint (\"ExtraTreeRegressor score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainLightGBM(self):\n\t\tlgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)\n\n\tdef testLightGBM(self):\n\t\tself.predicted_labels = lgb.predict(self.val_data)\n\t\tprint (\"LightGBM score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainXGBoost(self):\n\t\tself.__xgb.fit(self.train_data,self.train_labels,eval_metric=\"rmse\",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)\n\n\tdef testXGBoost(self):\n\t\tself.predicted_labels = self.__xgb.predict(self.val_data)\n\t\tprint (\"XGBoost score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\ttrain_data_name = sys.argv[1]\n\ttest_data_name = sys.argv[2]\n\tmodel = predict(train_data_name,test_data_name)\n\tmodel.data()\n\t# model.trainLinearRegression()\n\t# model.testLinearRegression()\n\n\t# model.trainExtraTreeRegressor()\n\t# model.testExtraTreeRegressor()\n\n\t# model.trainLightGBM()\n\t# model.testLightGBM()\n\n\t# model.trainXGBoost()\n\t# model.testXGBoost()\n\n\n\t# plotConfusionMatrix(model.test_labels,model.predicted_labels)\n\t\n\t# model.trainDecesionTree()\n\t# model.testDecesionTree()\n\n\t# model.trainRandomForrest()\n\t# model.testRandomForrest()\n\n\t# model.trainSVM()\n\t# model.testSVM()\n\n\n\n\n\n",
"step-ids": [
4,
8,
14,
17,
18
]
}
|
[
4,
8,
14,
17,
18
] |
<|reserved_special_token_0|>
class TwoStage(BayesianModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TwoStage(BayesianModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BayesianModel(object):
<|reserved_special_token_0|>
def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=
5000, logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
<|reserved_special_token_0|>
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs, minibatches=self.
minibatches)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain, step=self.steps, start=
start, progressbar=True)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,
'sd': sd, 'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
"""
Calculate Bayes Factor using a Bernoulli variable in the
trace.
"""
p_alt = trace[var_name].mean()
bayes_factor = p_alt / (1 - p_alt)
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,
len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
<|reserved_special_token_0|>
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BayesianModel(object):
"""
General Bayesian Model Class for quantifying
relationship between gene and phenotype
Adapted from Thomas Wiecki
https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523
"""
def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=
5000, logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
def _create_shared_vars(self, **inputs):
"""
For each input variable, create theano shared variable
and set their initial values.
Args:
**inputs (dict): inputs for Bayesian model
Returns:
dict: key, value - var_name, theano.shared variable
"""
shared_vars = {}
for name, data in inputs.items():
shared_vars[name] = shared(data, name=name)
return shared_vars
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs, minibatches=self.
minibatches)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain, step=self.steps, start=
start, progressbar=True)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,
'sd': sd, 'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
"""
Calculate Bayes Factor using a Bernoulli variable in the
trace.
"""
p_alt = trace[var_name].mean()
bayes_factor = p_alt / (1 - p_alt)
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,
len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
def _alpha_stats(self, trace):
"""
Calculate statistics of the alpha value in
the trace.
"""
mean = np.mean(trace['alpha'])
sd = np.std(trace['alpha'], ddof=1)
zscore = mean / sd
return mean, sd, zscore
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
<|reserved_special_token_1|>
'''
Bayesian models for TWAS.
Author: Kunal Bhutani <[email protected]>
'''
from scipy.stats import norm
import pymc3 as pm
import numpy as np
from theano import shared
from scipy.stats.distributions import pareto
from scipy import optimize
import theano.tensor as t
def tinvlogit(x):
return t.exp(x) / (1 + t.exp(x))
def calculate_waic(trace, model=None, r_logp=True):
"""
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculate the widely available information criterion and the effective
number of parameters of the samples in trace from model.
Read more theory here - in a paper by some of the
leading authorities on Model Selection - http://bit.ly/1W2YJ7c
"""
log_py = log_post_trace(trace, model)
lppd = np.sum(np.log(np.mean(np.exp(log_py), axis=0)))
p_waic = np.sum(np.var(log_py, axis=0))
if r_logp:
return -2 * lppd + 2 * p_waic, log_py, lppd
else:
return -2 * lppd + 2 * p_waic
def calculate_loo(trace=None, model=None, log_py=None):
"""
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculates leave-one-out (LOO) cross-validation for out of sample
predictive model fit, following Vehtari et al. (2015).
Cross-validation is computed using Pareto-smoothed importance sampling.
Returns log pointwise predictive density calculated via
approximated LOO cross-validation.
"""
if log_py is None:
log_py = log_post_trace(trace, model)
# Importance ratios
r = 1. / np.exp(log_py)
r_sorted = np.sort(r, axis=0)
# Extract largest 20% of importance ratios and
# fit generalized Pareto to each
# (returns tuple with shape, location, scale)
q80 = int(len(log_py) * 0.8)
pareto_fit = np.apply_along_axis(lambda x: pareto.fit(x, floc=0),
0, r_sorted[q80:])
# Calculate expected values of the order statistics of the fitted Pareto
S = len(r_sorted)
M = S - q80
z = (np.arange(M) + 0.5) / M
expvals = map(lambda x: pareto.ppf(z, x[0], scale=x[2]), pareto_fit.T)
# Replace importance ratios with order statistics of fitted Pareto
r_sorted[q80:] = np.vstack(expvals).T
# Unsort ratios (within columns) before using them as weights
r_new = np.array([x[np.argsort(i)]
for x, i in zip(r_sorted,
np.argsort(r, axis=0))])
# Truncate weights to guarantee finite variance
w = np.minimum(r_new, r_new.mean(axis=0) * S**0.75)
loo_lppd = np.sum(np.log(np.sum(w * np.exp(log_py), axis=0) / np.sum(w, axis=0)))
return loo_lppd
def log_post_trace(trace, model):
'''
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculate the elementwise log-posterior for the sampled trace.
'''
logp = np.hstack([obs.logp_elemwise(pt) for pt in trace]
for obs in model.observed_RVs if obs.__repr__() == 'phen')
if len(logp.shape) > 2:
logp = logp.squeeze(axis=1)
return logp
class BayesianModel(object):
'''
General Bayesian Model Class for quantifying
relationship between gene and phenotype
Adapted from Thomas Wiecki
https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523
'''
def __init__(self, variational=True, mb=False,
n_chain=50000, n_trace=5000,
logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
def _create_shared_vars(self, **inputs):
"""
For each input variable, create theano shared variable
and set their initial values.
Args:
**inputs (dict): inputs for Bayesian model
Returns:
dict: key, value - var_name, theano.shared variable
"""
shared_vars = {}
for name, data in inputs.items():
shared_vars[name] = shared(data, name=name)
return shared_vars
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs,
minibatches=self.minibatches,)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain,
step=self.steps,
start=start,
progressbar=True,
)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
#loo = calculate_loo(log_py=log_py)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic,
'waic': waic,
'logp': logp,
#'loo': loo,
'mu': mu,
'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp,
'mse': mean_mse,
'mse2': mse2,
'mu': mu,
'sd': sd,
'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
'''
Calculate Bayes Factor using a Bernoulli variable in the
trace.
'''
p_alt = trace[var_name].mean()
bayes_factor = (p_alt/(1-p_alt))
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'],
step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'],
loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx])
for idx in np.random.randint(0, len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'],
step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'],
trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
def _alpha_stats(self, trace):
"""
Calculate statistics of the alpha value in
the trace.
"""
mean = np.mean(trace['alpha'])
sd = np.std(trace['alpha'], ddof=1)
zscore = mean / sd
return mean, sd, zscore
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,
*args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,
*args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
# Model Selection
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
# Model 1
phenotype_mu_null = intercept
# Model 2
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen',
lambda value: pm.switch(mediator_model,
pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value),
pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)
),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10,
p_sigma_beta=10, *args, **kwargs):
"""
Expression ~ N(X\beta, \sigma_exp)
P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(X\beta\alpha, \sigma_phen)
P(\alpha) ~ Uniform(-10, 10)
P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'tau_beta': tau_beta,
'lambda_beta': lambda_beta,
'm_sigma_beta': m_sigma_beta,
'p_sigma_beta': p_sigma_beta
}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
# assert((coef_sd is not None) and (coef_mean is not None),
# 'Must provided coef_mean and coef_sd if using prior')
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
tau_beta = pm.HalfCauchy('tau_beta',
beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta',
beta=self.vars['lambda_beta'],
shape=(1, n_snps))
# lambda_beta = pm.StudentT('lambda_beta', nu=3, mu=0,
# lam=1, shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta,
tau_beta * tau_beta)
beta_med = pm.Normal('beta_med',
mu=0,
tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps),)
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self,
m_laplace_beta=1,
m_sigma_beta=10,
p_sigma_beta=10, *args, **kwargs):
"""
Expression ~ N(X\beta, \sigma_exp)
P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(X\beta\alpha, \sigma_phen)
P(\alpha) ~ Uniform(-10, 10)
P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta,
'm_sigma_beta': m_sigma_beta,
'p_sigma_beta': p_sigma_beta
}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self,
med_gen, med_phen,
gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx)) #
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Laplace('beta_med',
mu=0,
b=self.vars['m_laplace_beta'],
shape=(1, n_snps),)
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma',
lower=0,
upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'],
shape=n_tissues)
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma[self.med_idx],
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=n_studies)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen',
mu=phen_mu,
sd=phen_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self,
g_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta,
'p_sigma_beta': p_sigma_beta,
}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self,
gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta',
mu=0,
b=self.vars['g_laplace_beta'],
shape=(1, n_snps),)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self,
mediator_mu,
mediator_sd,
m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu,
'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta,
}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
# Phenotype
mediator = pm.Normal('mediator',
mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'],
shape=n_samples)
mediator_meas = pm.Normal('mediator_meas',
mu=mediator,
sd=gwas_error,
shape=n_samples,
observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
#alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self,
mediator_mu,
mediator_sd,
precomp_med=True,
heritability=0.1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu,
'mediator_sd': mediator_sd,
'heritability': heritability,
'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med,
}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
# Mediator
mediator = pm.Normal('mediator',
mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'],
shape=n_samples)
mediator_meas = pm.Normal('mediator_meas',
mu=mediator,
sd=gwas_error,
shape=n_samples,
observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = (p_var*h)/(1-h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained/(md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = (p_var*h)/(1-h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained/(md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
# Model Selection
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
# Model 1
phenotype_mu_null = intercept
# Model 2
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen',
lambda value: pm.switch(mediator_model,
pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value),
pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)
),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
|
flexible
|
{
"blob_id": "057140ef1b8db340656b75b3a06cea481e3f20af",
"index": 1689,
"step-1": "<mask token>\n\n\nclass TwoStage(BayesianModel):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TwoStageBF(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStageBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStageBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass Joint(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype.\n\n \"\"\"\n\n def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,\n tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,\n **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'Joint'\n self.model_type = model_type\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':\n tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n if model_type == 'laplace':\n self.create_model = self._create_model_laplace\n elif model_type == 'horseshoe':\n self.create_model = self._create_model_horseshoe\n elif model_type == 'prior':\n self.create_model = self._create_model_prior\n else:\n raise NotImplementedError('Unsupported model type')\n super(Joint, self).__init__(*args, **kwargs)\n\n def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])\n lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[\n 'lambda_beta'], shape=(1, n_snps))\n total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *\n tau_beta)\n beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n alpha = pm.Normal('alpha', 0, 1)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MultiStudyMultiTissue(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype in multiple studies \n and multiple tissues. Assume that tissues from the same\n individual are independent given the genotypes i.e.\n\n P(TisA, TisB | G) = P(TisA | G) P(TisB | G)\n\n \"\"\"\n\n def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'MultiStudyMultiTissue'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)\n\n def set_idx(self, med_idx, gwas_idx):\n self.med_idx = med_idx\n self.gwas_idx = gwas_idx\n return\n\n def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n n_tissues = len(np.unique(self.med_idx))\n n_studies = len(np.unique(self.gwas_idx))\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[\n 'm_laplace_beta'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,\n shape=n_tissues)\n mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,\n shape=n_tissues)\n mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[\n self.med_idx] * pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'], shape=n_tissues)\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma[self.med_idx], observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)\n alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)\n alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)\n alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=\n n_studies)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,\n shape=n_studies)\n phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx\n ] * phenotype_expression_mu\n phen_sigma = phenotype_sigma[self.gwas_idx]\n phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=\n gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass NonMediated(BayesianModel):\n \"\"\"\n Model the relationship between the genotype and\n phenotype without any added information about the \n mediator. Use it as a basis for getting\n the null distribution under a mediation analysis.\n \"\"\"\n\n def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'NonMediated'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':\n p_sigma_beta}\n super(NonMediated, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],\n shape=(1, n_snps))\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementError(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementError'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(MeasurementError, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Uniform('alpha', lower=-10, upper=10)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * mediator\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementErrorBF(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, precomp_med=True,\n heritability=0.1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementErrorBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'heritability': heritability, 'p_sigma_beta': p_sigma_beta,\n 'precomp_med': precomp_med}\n super(MeasurementErrorBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n if self.vars['precomp_med']:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = np.square(np.mean(self.vars['mediator_sd']))\n md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n else:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = t.var(mediator)\n md_mean_sq = t.sqr(t.mean(mediator))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n",
"step-2": "<mask token>\n\n\nclass TwoStage(BayesianModel):\n <mask token>\n <mask token>\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n if self.logistic:\n p = tinvlogit(phenotype_mu)\n phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)\n else:\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=\n self.vars['p_sigma_beta'])\n phen = pm.Normal('phen', mu=phenotype_mu, sd=\n phenotype_sigma, observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass TwoStageBF(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStageBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStageBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass Joint(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype.\n\n \"\"\"\n\n def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,\n tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,\n **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'Joint'\n self.model_type = model_type\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':\n tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n if model_type == 'laplace':\n self.create_model = self._create_model_laplace\n elif model_type == 'horseshoe':\n self.create_model = self._create_model_horseshoe\n elif model_type == 'prior':\n self.create_model = self._create_model_prior\n else:\n raise NotImplementedError('Unsupported model type')\n super(Joint, self).__init__(*args, **kwargs)\n\n def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])\n lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[\n 'lambda_beta'], shape=(1, n_snps))\n total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *\n tau_beta)\n beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n alpha = pm.Normal('alpha', 0, 1)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MultiStudyMultiTissue(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype in multiple studies \n and multiple tissues. Assume that tissues from the same\n individual are independent given the genotypes i.e.\n\n P(TisA, TisB | G) = P(TisA | G) P(TisB | G)\n\n \"\"\"\n\n def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'MultiStudyMultiTissue'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)\n\n def set_idx(self, med_idx, gwas_idx):\n self.med_idx = med_idx\n self.gwas_idx = gwas_idx\n return\n\n def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n n_tissues = len(np.unique(self.med_idx))\n n_studies = len(np.unique(self.gwas_idx))\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[\n 'm_laplace_beta'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,\n shape=n_tissues)\n mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,\n shape=n_tissues)\n mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[\n self.med_idx] * pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'], shape=n_tissues)\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma[self.med_idx], observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)\n alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)\n alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)\n alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=\n n_studies)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,\n shape=n_studies)\n phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx\n ] * phenotype_expression_mu\n phen_sigma = phenotype_sigma[self.gwas_idx]\n phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=\n gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass NonMediated(BayesianModel):\n \"\"\"\n Model the relationship between the genotype and\n phenotype without any added information about the \n mediator. Use it as a basis for getting\n the null distribution under a mediation analysis.\n \"\"\"\n\n def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'NonMediated'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':\n p_sigma_beta}\n super(NonMediated, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],\n shape=(1, n_snps))\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementError(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementError'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(MeasurementError, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Uniform('alpha', lower=-10, upper=10)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * mediator\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementErrorBF(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, precomp_med=True,\n heritability=0.1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementErrorBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'heritability': heritability, 'p_sigma_beta': p_sigma_beta,\n 'precomp_med': precomp_med}\n super(MeasurementErrorBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n if self.vars['precomp_med']:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = np.square(np.mean(self.vars['mediator_sd']))\n md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n else:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = t.var(mediator)\n md_mean_sq = t.sqr(t.mean(mediator))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n",
"step-3": "<mask token>\n\n\nclass BayesianModel(object):\n <mask token>\n\n def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=\n 5000, logistic=False, steps=None):\n \"\"\"\n Args:\n variational (bool, optional): Use Variational Inference\n mb (bool, optional): Use minibatches\n \"\"\"\n self.variational = variational\n self.cached_model = None\n self.mb = mb\n self.n_chain = n_chain\n self.n_trace = n_trace\n self.logistic = logistic\n self.steps = steps\n\n def cache_model(self, **inputs):\n \"\"\"\n Create a cached model for the Bayesian model using\n shared theano variables for each Bayesian\n input parameter.\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n \"\"\"\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)\n\n def create_model(self, **inputs):\n \"\"\"\n Each instance of this class needs to define\n their PYMC3 model in here.\n \"\"\"\n raise NotImplementedError('This method has to be overwritten.')\n <mask token>\n\n def _clean_inputs(self, inputs):\n \"\"\"\n Clean the inputs, i.e. remove some\n genotype columns. Useful for some class of Bayesian models\n such as Two-Stage, where first stage involves filtering\n on certain SNPs.\n\n Args:\n inputs (dict): inputs for Bayesian model\n Returns:\n dict: cleaned inputs for Bayesian model\n \"\"\"\n return inputs\n\n def run(self, **inputs):\n \"\"\"\n Run cached Bayesian model using the inputs\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if self.cached_model is None:\n self.cache_model(**inputs)\n for name, data in inputs.items():\n self.shared_vars[name].set_value(data)\n if self.mb and self.variational:\n self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),\n self._mb_generator(inputs['gwas_phen']))\n self.trace = self._inference()\n return self.trace\n\n def _inference(self, n_trace=None):\n \"\"\"\n Perform the inference. Uses ADVI if self.variational\n is True. Also, uses minibatches is self.mb=True based\n on generators defined in self.run.\n\n Otherwise, uses Metropolis.\n\n Args:\n n_trace (int, optional): Number of steps used for trace\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if n_trace is None:\n n_trace = self.n_trace\n with self.cached_model:\n if self.variational:\n if self.mb:\n v_params = pm.variational.advi_minibatch(n=self.n_chain,\n minibatch_tensors=self.minibatch_tensors,\n minibatch_RVs=self.minibatch_RVs, minibatches=self.\n minibatches)\n else:\n v_params = pm.variational.advi(n=self.n_chain)\n trace = pm.variational.sample_vp(v_params, draws=n_trace)\n self.v_params = v_params\n else:\n if self.steps is None:\n self.steps = pm.Metropolis()\n start = pm.find_MAP(fmin=optimize.fmin_powell)\n trace = pm.sample(self.n_chain, step=self.steps, start=\n start, progressbar=True)\n trace = trace[-n_trace:]\n self.trace = trace\n return trace\n\n def cross_validation(self, k_folds, **inputs):\n \"\"\"\n Run cross-validation on the inputs and calculate\n statistics for each fold test set.\n\n Args:\n k_folds (sklearn.cross_validation): Folds of test and train\n samples\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n dict: statistics for each fold\n \"\"\"\n self.cv_stats, self.cv_traces = [], []\n self.k_folds = k_folds\n inputs = self._clean_inputs(inputs)\n for i, fold in enumerate(k_folds):\n train, test = fold\n input_train, input_test = {}, {}\n for name, data in inputs.items():\n if name in self.cv_vars:\n input_train[name] = data[train]\n input_test[name] = data[test]\n else:\n input_train[name] = data\n input_test[name] = data\n trace = self.run(**input_train)\n stats = self.calculate_statistics(trace, **input_test)\n self.cv_traces.append(trace)\n self.cv_stats.append(stats)\n return self.cv_traces, self.cv_stats\n\n def calculate_ppc(self, trace):\n \"\"\"\n Calculate several post-predictive checks\n based on the trace.\n \"\"\"\n dic = pm.stats.dic(trace, self.cached_model)\n waic, log_py, logp = calculate_waic(trace, self.cached_model)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,\n 'zscore': zscore}\n\n def calculate_statistics(self, trace, **input_test):\n \"\"\"\n Calculate mse and logp statistics on a test set.\n\n Args:\n **input_test (dict): test set of inputs\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n dict: logp and mse\n \"\"\"\n inputs = self._clean_inputs(input_test)\n mc_logp = self._logp(trace, **inputs)\n mean_mse = self._mse(trace, **inputs)\n mse2 = self._mse2(trace, **inputs)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,\n 'sd': sd, 'zscore': zscore}\n\n def calculate_bf(self, trace, var_name='mediator_model'):\n \"\"\"\n Calculate Bayes Factor using a Bernoulli variable in the \n trace.\n \"\"\"\n p_alt = trace[var_name].mean()\n bayes_factor = p_alt / (1 - p_alt)\n return bayes_factor\n\n def _logp(self, trace, **inputs):\n \"\"\"\n Calculate log likelihood using Monte Carlo integration.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n float: Log likelihood as estimated by Monte Carlo integration\n \"\"\"\n\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,\n len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp\n\n def _mse(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit.\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse\n\n def _mse2(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit \n using posterior means of beta_med instead of\n sampling from it.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)\n phen_pred = exp * trace['alpha'].mean()\n mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n return mse\n <mask token>\n\n def _mb_generator(self, data, size=500):\n \"\"\"\n Generator for minibatches\n \"\"\"\n rng = np.random.RandomState(0)\n while True:\n ixs = rng.randint(len(data), size=size)\n yield data[ixs]\n\n\nclass TwoStage(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStage'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStage, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n if self.logistic:\n p = tinvlogit(phenotype_mu)\n phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)\n else:\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=\n self.vars['p_sigma_beta'])\n phen = pm.Normal('phen', mu=phenotype_mu, sd=\n phenotype_sigma, observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass TwoStageBF(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStageBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStageBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass Joint(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype.\n\n \"\"\"\n\n def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,\n tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,\n **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'Joint'\n self.model_type = model_type\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':\n tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n if model_type == 'laplace':\n self.create_model = self._create_model_laplace\n elif model_type == 'horseshoe':\n self.create_model = self._create_model_horseshoe\n elif model_type == 'prior':\n self.create_model = self._create_model_prior\n else:\n raise NotImplementedError('Unsupported model type')\n super(Joint, self).__init__(*args, **kwargs)\n\n def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])\n lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[\n 'lambda_beta'], shape=(1, n_snps))\n total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *\n tau_beta)\n beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n alpha = pm.Normal('alpha', 0, 1)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MultiStudyMultiTissue(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype in multiple studies \n and multiple tissues. Assume that tissues from the same\n individual are independent given the genotypes i.e.\n\n P(TisA, TisB | G) = P(TisA | G) P(TisB | G)\n\n \"\"\"\n\n def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'MultiStudyMultiTissue'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)\n\n def set_idx(self, med_idx, gwas_idx):\n self.med_idx = med_idx\n self.gwas_idx = gwas_idx\n return\n\n def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n n_tissues = len(np.unique(self.med_idx))\n n_studies = len(np.unique(self.gwas_idx))\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[\n 'm_laplace_beta'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,\n shape=n_tissues)\n mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,\n shape=n_tissues)\n mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[\n self.med_idx] * pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'], shape=n_tissues)\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma[self.med_idx], observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)\n alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)\n alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)\n alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=\n n_studies)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,\n shape=n_studies)\n phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx\n ] * phenotype_expression_mu\n phen_sigma = phenotype_sigma[self.gwas_idx]\n phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=\n gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass NonMediated(BayesianModel):\n \"\"\"\n Model the relationship between the genotype and\n phenotype without any added information about the \n mediator. Use it as a basis for getting\n the null distribution under a mediation analysis.\n \"\"\"\n\n def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'NonMediated'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':\n p_sigma_beta}\n super(NonMediated, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],\n shape=(1, n_snps))\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementError(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementError'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(MeasurementError, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Uniform('alpha', lower=-10, upper=10)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * mediator\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementErrorBF(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, precomp_med=True,\n heritability=0.1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementErrorBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'heritability': heritability, 'p_sigma_beta': p_sigma_beta,\n 'precomp_med': precomp_med}\n super(MeasurementErrorBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n if self.vars['precomp_med']:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = np.square(np.mean(self.vars['mediator_sd']))\n md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n else:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = t.var(mediator)\n md_mean_sq = t.sqr(t.mean(mediator))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n",
"step-4": "<mask token>\n\n\nclass BayesianModel(object):\n \"\"\"\n General Bayesian Model Class for quantifying\n relationship between gene and phenotype\n\n Adapted from Thomas Wiecki\n https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523\n\n \"\"\"\n\n def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=\n 5000, logistic=False, steps=None):\n \"\"\"\n Args:\n variational (bool, optional): Use Variational Inference\n mb (bool, optional): Use minibatches\n \"\"\"\n self.variational = variational\n self.cached_model = None\n self.mb = mb\n self.n_chain = n_chain\n self.n_trace = n_trace\n self.logistic = logistic\n self.steps = steps\n\n def cache_model(self, **inputs):\n \"\"\"\n Create a cached model for the Bayesian model using\n shared theano variables for each Bayesian\n input parameter.\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n \"\"\"\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)\n\n def create_model(self, **inputs):\n \"\"\"\n Each instance of this class needs to define\n their PYMC3 model in here.\n \"\"\"\n raise NotImplementedError('This method has to be overwritten.')\n\n def _create_shared_vars(self, **inputs):\n \"\"\"\n For each input variable, create theano shared variable\n and set their initial values.\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n dict: key, value - var_name, theano.shared variable\n \"\"\"\n shared_vars = {}\n for name, data in inputs.items():\n shared_vars[name] = shared(data, name=name)\n return shared_vars\n\n def _clean_inputs(self, inputs):\n \"\"\"\n Clean the inputs, i.e. remove some\n genotype columns. Useful for some class of Bayesian models\n such as Two-Stage, where first stage involves filtering\n on certain SNPs.\n\n Args:\n inputs (dict): inputs for Bayesian model\n Returns:\n dict: cleaned inputs for Bayesian model\n \"\"\"\n return inputs\n\n def run(self, **inputs):\n \"\"\"\n Run cached Bayesian model using the inputs\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if self.cached_model is None:\n self.cache_model(**inputs)\n for name, data in inputs.items():\n self.shared_vars[name].set_value(data)\n if self.mb and self.variational:\n self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),\n self._mb_generator(inputs['gwas_phen']))\n self.trace = self._inference()\n return self.trace\n\n def _inference(self, n_trace=None):\n \"\"\"\n Perform the inference. Uses ADVI if self.variational\n is True. Also, uses minibatches is self.mb=True based\n on generators defined in self.run.\n\n Otherwise, uses Metropolis.\n\n Args:\n n_trace (int, optional): Number of steps used for trace\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if n_trace is None:\n n_trace = self.n_trace\n with self.cached_model:\n if self.variational:\n if self.mb:\n v_params = pm.variational.advi_minibatch(n=self.n_chain,\n minibatch_tensors=self.minibatch_tensors,\n minibatch_RVs=self.minibatch_RVs, minibatches=self.\n minibatches)\n else:\n v_params = pm.variational.advi(n=self.n_chain)\n trace = pm.variational.sample_vp(v_params, draws=n_trace)\n self.v_params = v_params\n else:\n if self.steps is None:\n self.steps = pm.Metropolis()\n start = pm.find_MAP(fmin=optimize.fmin_powell)\n trace = pm.sample(self.n_chain, step=self.steps, start=\n start, progressbar=True)\n trace = trace[-n_trace:]\n self.trace = trace\n return trace\n\n def cross_validation(self, k_folds, **inputs):\n \"\"\"\n Run cross-validation on the inputs and calculate\n statistics for each fold test set.\n\n Args:\n k_folds (sklearn.cross_validation): Folds of test and train\n samples\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n dict: statistics for each fold\n \"\"\"\n self.cv_stats, self.cv_traces = [], []\n self.k_folds = k_folds\n inputs = self._clean_inputs(inputs)\n for i, fold in enumerate(k_folds):\n train, test = fold\n input_train, input_test = {}, {}\n for name, data in inputs.items():\n if name in self.cv_vars:\n input_train[name] = data[train]\n input_test[name] = data[test]\n else:\n input_train[name] = data\n input_test[name] = data\n trace = self.run(**input_train)\n stats = self.calculate_statistics(trace, **input_test)\n self.cv_traces.append(trace)\n self.cv_stats.append(stats)\n return self.cv_traces, self.cv_stats\n\n def calculate_ppc(self, trace):\n \"\"\"\n Calculate several post-predictive checks\n based on the trace.\n \"\"\"\n dic = pm.stats.dic(trace, self.cached_model)\n waic, log_py, logp = calculate_waic(trace, self.cached_model)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,\n 'zscore': zscore}\n\n def calculate_statistics(self, trace, **input_test):\n \"\"\"\n Calculate mse and logp statistics on a test set.\n\n Args:\n **input_test (dict): test set of inputs\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n dict: logp and mse\n \"\"\"\n inputs = self._clean_inputs(input_test)\n mc_logp = self._logp(trace, **inputs)\n mean_mse = self._mse(trace, **inputs)\n mse2 = self._mse2(trace, **inputs)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,\n 'sd': sd, 'zscore': zscore}\n\n def calculate_bf(self, trace, var_name='mediator_model'):\n \"\"\"\n Calculate Bayes Factor using a Bernoulli variable in the \n trace.\n \"\"\"\n p_alt = trace[var_name].mean()\n bayes_factor = p_alt / (1 - p_alt)\n return bayes_factor\n\n def _logp(self, trace, **inputs):\n \"\"\"\n Calculate log likelihood using Monte Carlo integration.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n float: Log likelihood as estimated by Monte Carlo integration\n \"\"\"\n\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,\n len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp\n\n def _mse(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit.\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse\n\n def _mse2(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit \n using posterior means of beta_med instead of\n sampling from it.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)\n phen_pred = exp * trace['alpha'].mean()\n mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n return mse\n\n def _alpha_stats(self, trace):\n \"\"\"\n Calculate statistics of the alpha value in\n the trace.\n \"\"\"\n mean = np.mean(trace['alpha'])\n sd = np.std(trace['alpha'], ddof=1)\n zscore = mean / sd\n return mean, sd, zscore\n\n def _mb_generator(self, data, size=500):\n \"\"\"\n Generator for minibatches\n \"\"\"\n rng = np.random.RandomState(0)\n while True:\n ixs = rng.randint(len(data), size=size)\n yield data[ixs]\n\n\nclass TwoStage(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStage'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStage, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n if self.logistic:\n p = tinvlogit(phenotype_mu)\n phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)\n else:\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=\n self.vars['p_sigma_beta'])\n phen = pm.Normal('phen', mu=phenotype_mu, sd=\n phenotype_sigma, observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass TwoStageBF(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStageBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStageBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass Joint(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype.\n\n \"\"\"\n\n def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,\n tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,\n **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'Joint'\n self.model_type = model_type\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':\n tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n if model_type == 'laplace':\n self.create_model = self._create_model_laplace\n elif model_type == 'horseshoe':\n self.create_model = self._create_model_horseshoe\n elif model_type == 'prior':\n self.create_model = self._create_model_prior\n else:\n raise NotImplementedError('Unsupported model type')\n super(Joint, self).__init__(*args, **kwargs)\n\n def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=\n self.vars['coef_sd'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])\n lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[\n 'lambda_beta'], shape=(1, n_snps))\n total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *\n tau_beta)\n beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n alpha = pm.Normal('alpha', 0, 1)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'])\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma, observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MultiStudyMultiTissue(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype in multiple studies \n and multiple tissues. Assume that tissues from the same\n individual are independent given the genotypes i.e.\n\n P(TisA, TisB | G) = P(TisA | G) P(TisB | G)\n\n \"\"\"\n\n def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Expression ~ N(X\beta, \\\\sigma_exp)\n P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\beta\u0007lpha, \\\\sigma_phen)\n P(\u0007lpha) ~ Uniform(-10, 10)\n P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'MultiStudyMultiTissue'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':\n m_sigma_beta, 'p_sigma_beta': p_sigma_beta}\n super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)\n\n def set_idx(self, med_idx, gwas_idx):\n self.med_idx = med_idx\n self.gwas_idx = gwas_idx\n return\n\n def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n n_tissues = len(np.unique(self.med_idx))\n n_studies = len(np.unique(self.gwas_idx))\n with pm.Model() as phenotype_model:\n beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[\n 'm_laplace_beta'], shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,\n shape=n_tissues)\n mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,\n shape=n_tissues)\n mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[\n self.med_idx] * pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars\n ['m_sigma_beta'], shape=n_tissues)\n mediator = pm.Normal('mediator', mu=mediator_mu, sd=\n mediator_sigma[self.med_idx], observed=med_phen)\n intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)\n alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)\n alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)\n alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=\n n_studies)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,\n shape=n_studies)\n phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx\n ] * phenotype_expression_mu\n phen_sigma = phenotype_sigma[self.gwas_idx]\n phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=\n gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass NonMediated(BayesianModel):\n \"\"\"\n Model the relationship between the genotype and\n phenotype without any added information about the \n mediator. Use it as a basis for getting\n the null distribution under a mediation analysis.\n \"\"\"\n\n def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'NonMediated'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':\n p_sigma_beta}\n super(NonMediated, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],\n shape=(1, n_snps))\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementError(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementError'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(MeasurementError, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Uniform('alpha', lower=-10, upper=10)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * mediator\n phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\nclass MeasurementErrorBF(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n\n def __init__(self, mediator_mu, mediator_sd, precomp_med=True,\n heritability=0.1, p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementErrorBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,\n 'heritability': heritability, 'p_sigma_beta': p_sigma_beta,\n 'precomp_med': precomp_med}\n super(MeasurementErrorBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'], shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=\n gwas_error, shape=n_samples, observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.\n vars['p_sigma_beta'])\n if self.vars['precomp_med']:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = np.square(np.mean(self.vars['mediator_sd']))\n md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n else:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = p_var * h / (1 - h)\n md_var = t.var(mediator)\n md_mean_sq = t.sqr(t.mean(mediator))\n var_alpha = var_explained / (md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n phenotype_mu_null = intercept\n phenotype_mu_mediator = intercept + alpha * mediator\n phen = pm.DensityDist('phen', lambda value: pm.switch(\n mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd\n =phenotype_sigma).logp(value), pm.Normal.dist(mu=\n phenotype_mu_null, sd=phenotype_sigma).logp(value)),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n",
"step-5": "'''\nBayesian models for TWAS.\n\nAuthor: Kunal Bhutani <[email protected]>\n'''\n\nfrom scipy.stats import norm\nimport pymc3 as pm\nimport numpy as np\nfrom theano import shared\nfrom scipy.stats.distributions import pareto\nfrom scipy import optimize\nimport theano.tensor as t\n\n\ndef tinvlogit(x):\n return t.exp(x) / (1 + t.exp(x))\n\n\ndef calculate_waic(trace, model=None, r_logp=True):\n \"\"\"\n Taken directly from PyMC3.\n Reproduced to only take into account the phenotype and not mediator\n variable when calculating logp.\n\n Calculate the widely available information criterion and the effective\n number of parameters of the samples in trace from model.\n\n Read more theory here - in a paper by some of the\n leading authorities on Model Selection - http://bit.ly/1W2YJ7c\n \"\"\"\n log_py = log_post_trace(trace, model)\n lppd = np.sum(np.log(np.mean(np.exp(log_py), axis=0)))\n p_waic = np.sum(np.var(log_py, axis=0))\n if r_logp:\n return -2 * lppd + 2 * p_waic, log_py, lppd\n else:\n return -2 * lppd + 2 * p_waic\n\n\ndef calculate_loo(trace=None, model=None, log_py=None):\n \"\"\"\n Taken directly from PyMC3.\n Reproduced to only take into account the phenotype and not mediator\n variable when calculating logp.\n\n Calculates leave-one-out (LOO) cross-validation for out of sample\n predictive model fit, following Vehtari et al. (2015).\n Cross-validation is computed using Pareto-smoothed importance sampling.\n\n Returns log pointwise predictive density calculated via\n approximated LOO cross-validation.\n \"\"\"\n if log_py is None:\n log_py = log_post_trace(trace, model)\n # Importance ratios\n r = 1. / np.exp(log_py)\n r_sorted = np.sort(r, axis=0)\n\n # Extract largest 20% of importance ratios and\n # fit generalized Pareto to each\n # (returns tuple with shape, location, scale)\n q80 = int(len(log_py) * 0.8)\n pareto_fit = np.apply_along_axis(lambda x: pareto.fit(x, floc=0),\n 0, r_sorted[q80:])\n # Calculate expected values of the order statistics of the fitted Pareto\n S = len(r_sorted)\n M = S - q80\n z = (np.arange(M) + 0.5) / M\n expvals = map(lambda x: pareto.ppf(z, x[0], scale=x[2]), pareto_fit.T)\n\n # Replace importance ratios with order statistics of fitted Pareto\n r_sorted[q80:] = np.vstack(expvals).T\n # Unsort ratios (within columns) before using them as weights\n r_new = np.array([x[np.argsort(i)]\n for x, i in zip(r_sorted,\n np.argsort(r, axis=0))])\n\n # Truncate weights to guarantee finite variance\n w = np.minimum(r_new, r_new.mean(axis=0) * S**0.75)\n loo_lppd = np.sum(np.log(np.sum(w * np.exp(log_py), axis=0) / np.sum(w, axis=0)))\n\n return loo_lppd\n\n\ndef log_post_trace(trace, model):\n '''\n Taken directly from PyMC3.\n Reproduced to only take into account the phenotype and not mediator\n variable when calculating logp.\n\n Calculate the elementwise log-posterior for the sampled trace.\n '''\n logp = np.hstack([obs.logp_elemwise(pt) for pt in trace]\n for obs in model.observed_RVs if obs.__repr__() == 'phen')\n if len(logp.shape) > 2:\n logp = logp.squeeze(axis=1)\n return logp\n\n\nclass BayesianModel(object):\n '''\n General Bayesian Model Class for quantifying\n relationship between gene and phenotype\n\n Adapted from Thomas Wiecki\n https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523\n\n '''\n\n def __init__(self, variational=True, mb=False,\n n_chain=50000, n_trace=5000,\n logistic=False, steps=None):\n \"\"\"\n Args:\n variational (bool, optional): Use Variational Inference\n mb (bool, optional): Use minibatches\n \"\"\"\n self.variational = variational\n self.cached_model = None\n self.mb = mb\n self.n_chain = n_chain\n self.n_trace = n_trace\n self.logistic = logistic\n self.steps = steps\n\n\n def cache_model(self, **inputs):\n \"\"\"\n Create a cached model for the Bayesian model using\n shared theano variables for each Bayesian\n input parameter.\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n \"\"\"\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)\n\n def create_model(self, **inputs):\n \"\"\"\n Each instance of this class needs to define\n their PYMC3 model in here.\n \"\"\"\n raise NotImplementedError('This method has to be overwritten.')\n\n def _create_shared_vars(self, **inputs):\n \"\"\"\n For each input variable, create theano shared variable\n and set their initial values.\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n dict: key, value - var_name, theano.shared variable\n \"\"\"\n shared_vars = {}\n for name, data in inputs.items():\n shared_vars[name] = shared(data, name=name)\n return shared_vars\n\n def _clean_inputs(self, inputs):\n \"\"\"\n Clean the inputs, i.e. remove some\n genotype columns. Useful for some class of Bayesian models\n such as Two-Stage, where first stage involves filtering\n on certain SNPs.\n\n Args:\n inputs (dict): inputs for Bayesian model\n Returns:\n dict: cleaned inputs for Bayesian model\n \"\"\"\n return inputs\n\n def run(self, **inputs):\n \"\"\"\n Run cached Bayesian model using the inputs\n\n Args:\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if self.cached_model is None:\n self.cache_model(**inputs)\n for name, data in inputs.items():\n self.shared_vars[name].set_value(data)\n if self.mb and self.variational:\n self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),\n self._mb_generator(inputs['gwas_phen']))\n self.trace = self._inference()\n return self.trace\n\n def _inference(self, n_trace=None):\n \"\"\"\n Perform the inference. Uses ADVI if self.variational\n is True. Also, uses minibatches is self.mb=True based\n on generators defined in self.run.\n\n Otherwise, uses Metropolis.\n\n Args:\n n_trace (int, optional): Number of steps used for trace\n Returns:\n trace: Trace of the PyMC3 inference\n \"\"\"\n if n_trace is None:\n n_trace = self.n_trace\n\n with self.cached_model:\n if self.variational:\n if self.mb:\n v_params = pm.variational.advi_minibatch(n=self.n_chain,\n minibatch_tensors=self.minibatch_tensors,\n minibatch_RVs=self.minibatch_RVs,\n minibatches=self.minibatches,)\n else:\n v_params = pm.variational.advi(n=self.n_chain)\n trace = pm.variational.sample_vp(v_params, draws=n_trace)\n self.v_params = v_params\n else:\n if self.steps is None:\n self.steps = pm.Metropolis()\n start = pm.find_MAP(fmin=optimize.fmin_powell)\n trace = pm.sample(self.n_chain,\n step=self.steps,\n start=start,\n progressbar=True,\n )\n trace = trace[-n_trace:]\n self.trace = trace\n return trace\n\n def cross_validation(self, k_folds, **inputs):\n \"\"\"\n Run cross-validation on the inputs and calculate\n statistics for each fold test set.\n\n Args:\n k_folds (sklearn.cross_validation): Folds of test and train\n samples\n **inputs (dict): inputs for Bayesian model\n\n Returns:\n dict: statistics for each fold\n \"\"\"\n self.cv_stats, self.cv_traces = [], []\n self.k_folds = k_folds\n inputs = self._clean_inputs(inputs)\n for i, fold in enumerate(k_folds):\n train, test = fold\n input_train, input_test = {}, {}\n for name, data in inputs.items():\n if name in self.cv_vars:\n input_train[name] = data[train]\n input_test[name] = data[test]\n else:\n input_train[name] = data\n input_test[name] = data\n trace = self.run(**input_train)\n stats = self.calculate_statistics(trace, **input_test)\n self.cv_traces.append(trace)\n self.cv_stats.append(stats)\n return self.cv_traces, self.cv_stats\n\n def calculate_ppc(self, trace):\n \"\"\"\n Calculate several post-predictive checks\n based on the trace.\n \"\"\"\n dic = pm.stats.dic(trace, self.cached_model)\n waic, log_py, logp = calculate_waic(trace, self.cached_model)\n #loo = calculate_loo(log_py=log_py)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'dic': dic,\n 'waic': waic,\n 'logp': logp,\n #'loo': loo,\n 'mu': mu,\n 'sd': sd,\n 'zscore': zscore}\n\n def calculate_statistics(self, trace, **input_test):\n \"\"\"\n Calculate mse and logp statistics on a test set.\n\n Args:\n **input_test (dict): test set of inputs\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n dict: logp and mse\n \"\"\"\n inputs = self._clean_inputs(input_test)\n mc_logp = self._logp(trace, **inputs)\n mean_mse = self._mse(trace, **inputs)\n mse2 = self._mse2(trace, **inputs)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'logp': mc_logp,\n 'mse': mean_mse,\n 'mse2': mse2,\n 'mu': mu,\n 'sd': sd,\n 'zscore': zscore}\n\n def calculate_bf(self, trace, var_name='mediator_model'):\n '''\n Calculate Bayes Factor using a Bernoulli variable in the \n trace.\n '''\n p_alt = trace[var_name].mean()\n bayes_factor = (p_alt/(1-p_alt))\n return bayes_factor\n\n\n def _logp(self, trace, **inputs):\n \"\"\"\n Calculate log likelihood using Monte Carlo integration.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n Returns:\n float: Log likelihood as estimated by Monte Carlo integration\n \"\"\"\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp\n\n def _mse(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit.\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse\n\n def _mse2(self, trace, **inputs):\n \"\"\"\n Calculate mean squared error of the model fit \n using posterior means of beta_med instead of\n sampling from it.\n\n Args:\n **inputs (dict): inputs used in likelhood calculation\n trace (PyMC3.trace): Trace of the inference chain\n\n Returns:\n float: Mean squared error across all samples\n \"\"\"\n exp = np.dot(inputs['gwas_gen'],\n trace['beta_med'].mean(axis=0).T)\n phen_pred = exp * trace['alpha'].mean()\n mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n return mse\n\n def _alpha_stats(self, trace):\n \"\"\"\n Calculate statistics of the alpha value in\n the trace.\n \"\"\"\n mean = np.mean(trace['alpha'])\n sd = np.std(trace['alpha'], ddof=1)\n zscore = mean / sd\n return mean, sd, zscore\n\n def _mb_generator(self, data, size=500):\n \"\"\"\n Generator for minibatches\n \"\"\"\n rng = np.random.RandomState(0)\n while True:\n ixs = rng.randint(len(data), size=size)\n yield data[ixs]\n\n\nclass TwoStage(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStage'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean,\n 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStage, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med',\n mu=self.vars['coef_mean'],\n sd=self.vars['coef_sd'],\n shape=(1, n_snps))\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n if self.logistic:\n p = tinvlogit(phenotype_mu)\n phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)\n else:\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen)\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\nclass TwoStageBF(BayesianModel):\n \"\"\"\n Two Stage Inference.\n\n First stage: Bootstrapped ElasticNet\n Second stage: Use loci that were learned in the first stage\n and their mean and std as priors for a simple\n Bayesian Linear Regression\n\n Attributes:\n\n \"\"\"\n def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,\n *args, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.name = 'TwoStageBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean,\n 'coef_sd': coef_sd,\n 'p_sigma_beta': p_sigma_beta}\n super(TwoStageBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_gen, gwas_phen):\n \"\"\"\n Simple Bayesian Linear Regression\n\n Args:\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n Returns:\n pymc3.Model(): The Bayesian model\n \"\"\"\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med',\n mu=self.vars['coef_mean'],\n sd=self.vars['coef_sd'],\n shape=(1, n_snps))\n \n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n \n\n # Model Selection\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n\n # Model 1\n phenotype_mu_null = intercept\n\n # Model 2\n phenotype_mu_mediator = intercept + alpha * mediator\n\n phen = pm.DensityDist('phen',\n lambda value: pm.switch(mediator_model, \n pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value), \n pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)\n ),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n\n \n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model\n\n\n\nclass Joint(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype.\n\n \"\"\"\n def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,\n tau_beta=1, lambda_beta=1, m_sigma_beta=10,\n p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Expression ~ N(X\\beta, \\sigma_exp)\n P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\\beta\\alpha, \\sigma_phen)\n P(\\alpha) ~ Uniform(-10, 10)\n P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'Joint'\n self.model_type = model_type\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'coef_mean': coef_mean,\n 'coef_sd': coef_sd,\n 'tau_beta': tau_beta,\n 'lambda_beta': lambda_beta,\n 'm_sigma_beta': m_sigma_beta,\n 'p_sigma_beta': p_sigma_beta\n }\n if model_type == 'laplace':\n self.create_model = self._create_model_laplace\n elif model_type == 'horseshoe':\n self.create_model = self._create_model_horseshoe\n elif model_type == 'prior':\n # assert((coef_sd is not None) and (coef_mean is not None),\n # 'Must provided coef_mean and coef_sd if using prior')\n self.create_model = self._create_model_prior\n else:\n raise NotImplementedError('Unsupported model type')\n super(Joint, self).__init__(*args, **kwargs)\n\n def _create_model_prior(self, med_gen, med_phen,\n gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n # Expression\n beta_med = pm.Normal('beta_med',\n mu=self.vars['coef_mean'],\n sd=self.vars['coef_sd'],\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept',\n mu=0,\n sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma',\n beta=self.vars['m_sigma_beta'])\n mediator = pm.Normal('mediator',\n mu=mediator_mu,\n sd=mediator_sigma,\n observed=med_phen)\n # Phenotype\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n # alpha = pm.Uniform('alpha', -10, 10)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen)\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\n def _create_model_horseshoe(self, med_gen, med_phen,\n gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n # Expression\n tau_beta = pm.HalfCauchy('tau_beta',\n beta=self.vars['tau_beta'])\n lambda_beta = pm.HalfCauchy('lambda_beta',\n beta=self.vars['lambda_beta'],\n shape=(1, n_snps))\n # lambda_beta = pm.StudentT('lambda_beta', nu=3, mu=0,\n # lam=1, shape=(1, n_snps))\n total_variance = pm.dot(lambda_beta * lambda_beta,\n tau_beta * tau_beta)\n beta_med = pm.Normal('beta_med',\n mu=0,\n tau=1 / total_variance,\n shape=(1, n_snps))\n mediator_intercept = pm.Normal('mediator_intercept',\n mu=0,\n sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma',\n beta=self.vars['m_sigma_beta'])\n mediator = pm.Normal('mediator',\n mu=mediator_mu,\n sd=mediator_sigma,\n observed=med_phen)\n # Phenotype\n alpha = pm.Normal('alpha', 0, 1)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen)\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\n def _create_model_laplace(self, med_gen, med_phen,\n gwas_gen, gwas_phen):\n \"\"\"\n Args:\n med_gen (pandas.DataFrame): Mediator genotypes\n med_phen (pandas.DataFrame): Mediator phenotypes\n gwas_gen (pandas.DataFrame): GWAS genotypes\n gwas_phen (pandas.DataFrame): GWAS phenotypes\n\n \"\"\"\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n # Expression\n beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps),)\n mediator_intercept = pm.Normal('mediator_intercept',\n mu=0,\n sd=1)\n mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)\n mediator_sigma = pm.HalfCauchy('mediator_sigma',\n beta=self.vars['m_sigma_beta'])\n mediator = pm.Normal('mediator',\n mu=mediator_mu,\n sd=mediator_sigma,\n observed=med_phen)\n # Phenotype\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', 0, 1)\n # alpha = pm.Uniform('alpha', -10, 10)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * phenotype_expression_mu\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen)\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\nclass MultiStudyMultiTissue(BayesianModel):\n \"\"\"\n Jointly model the transcriptional regulation and\n its effect on the phenotype in multiple studies \n and multiple tissues. Assume that tissues from the same\n individual are independent given the genotypes i.e.\n\n P(TisA, TisB | G) = P(TisA | G) P(TisB | G)\n\n \"\"\"\n def __init__(self,\n m_laplace_beta=1,\n m_sigma_beta=10,\n p_sigma_beta=10, *args, **kwargs):\n \"\"\"\n Expression ~ N(X\\beta, \\sigma_exp)\n P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n Phenotype ~ N(X\\beta\\alpha, \\sigma_phen)\n P(\\alpha) ~ Uniform(-10, 10)\n P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n Args:\n tau_beta (int): P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n lambda_beta (int): P(\\beta) ~ Horseshoe (tau_beta, lambda_beta)\n m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)\n p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)\n\n \"\"\"\n self.name = 'MultiStudyMultiTissue'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'m_laplace_beta': m_laplace_beta,\n 'm_sigma_beta': m_sigma_beta,\n 'p_sigma_beta': p_sigma_beta\n }\n super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)\n\n def set_idx(self, med_idx, gwas_idx):\n self.med_idx = med_idx\n self.gwas_idx = gwas_idx\n return\n\n def create_model(self, \n med_gen, med_phen, \n gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n n_tissues = len(np.unique(self.med_idx)) #\n n_studies = len(np.unique(self.gwas_idx))\n\n with pm.Model() as phenotype_model:\n # Expression\n \n beta_med = pm.Laplace('beta_med',\n mu=0,\n b=self.vars['m_laplace_beta'],\n shape=(1, n_snps),)\n mediator_intercept = pm.Normal('mediator_intercept',\n mu=0,\n sd=1,\n shape=n_tissues)\n mediator_gamma = pm.Uniform('mediator_gamma',\n lower=0,\n upper=1,\n shape=n_tissues)\n mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[self.med_idx] * pm.dot(beta_med, med_gen.T) \n mediator_sigma = pm.HalfCauchy('mediator_sigma',\n beta=self.vars['m_sigma_beta'],\n shape=n_tissues)\n mediator = pm.Normal('mediator',\n mu=mediator_mu,\n sd=mediator_sigma[self.med_idx],\n observed=med_phen)\n # Phenotype\n intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)\n alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)\n alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)\n alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=n_studies)\n # alpha = pm.Uniform('alpha', -10, 10)\n phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=1,\n shape=n_studies)\n phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx] * phenotype_expression_mu\n phen_sigma = phenotype_sigma[self.gwas_idx]\n phen = pm.Normal('phen',\n mu=phen_mu,\n sd=phen_sigma,\n observed=gwas_phen)\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\n\nclass NonMediated(BayesianModel):\n \"\"\"\n Model the relationship between the genotype and\n phenotype without any added information about the \n mediator. Use it as a basis for getting\n the null distribution under a mediation analysis.\n \"\"\"\n def __init__(self,\n g_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'NonMediated'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'g_laplace_beta': g_laplace_beta,\n 'p_sigma_beta': p_sigma_beta,\n }\n super(NonMediated, self).__init__(*args, **kwargs)\n\n def create_model(self, \n gwas_gen, gwas_phen):\n n_snps = gwas_gen.eval().shape[1]\n with pm.Model() as phenotype_model:\n beta = pm.Laplace('beta',\n mu=0,\n b=self.vars['g_laplace_beta'],\n shape=(1, n_snps),)\n # Phenotype\n intercept = pm.Normal('intercept', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen)\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\n\n\nclass MeasurementError(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n def __init__(self,\n mediator_mu,\n mediator_sd,\n m_laplace_beta=1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementError'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu,\n 'mediator_sd': mediator_sd,\n 'p_sigma_beta': p_sigma_beta,\n }\n super(MeasurementError, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n # Phenotype\n mediator = pm.Normal('mediator',\n mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'],\n shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas',\n mu=mediator,\n sd=gwas_error,\n shape=n_samples,\n observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Uniform('alpha', lower=-10, upper=10)\n #alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n phenotype_mu = intercept + alpha * mediator\n phen = pm.Normal('phen',\n mu=phenotype_mu,\n sd=phenotype_sigma,\n observed=gwas_phen) \n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model\n\nclass MeasurementErrorBF(BayesianModel):\n \"\"\"\n Use the canonical definition of measurement error as described\n in http://andrewgelman.com/2016/09/04/29847/\n\n \"\"\"\n def __init__(self,\n mediator_mu,\n mediator_sd,\n precomp_med=True,\n heritability=0.1,\n p_sigma_beta=10, *args, **kwargs):\n self.name = 'MeasurementErrorBF'\n self.cv_vars = ['gwas_phen', 'gwas_gen']\n self.vars = {'mediator_mu': mediator_mu,\n 'mediator_sd': mediator_sd,\n 'heritability': heritability,\n 'p_sigma_beta': p_sigma_beta,\n 'precomp_med': precomp_med,\n }\n super(MeasurementErrorBF, self).__init__(*args, **kwargs)\n\n def create_model(self, gwas_mediator, gwas_phen, gwas_error):\n n_samples = gwas_mediator.eval().shape[0]\n with pm.Model() as phenotype_model:\n\n # Mediator\n mediator = pm.Normal('mediator',\n mu=self.vars['mediator_mu'],\n sd=self.vars['mediator_sd'],\n shape=n_samples)\n mediator_meas = pm.Normal('mediator_meas',\n mu=mediator,\n sd=gwas_error,\n shape=n_samples,\n observed=gwas_mediator)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n\n if self.vars['precomp_med']:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = (p_var*h)/(1-h)\n md_var = np.square(np.mean(self.vars['mediator_sd']))\n md_mean_sq = np.square(np.mean(self.vars['mediator_mu'])) \n var_alpha = var_explained/(md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n else:\n p_var = t.sqr(phenotype_sigma)\n h = self.vars['heritability']\n var_explained = (p_var*h)/(1-h)\n md_var = t.var(mediator)\n md_mean_sq = t.sqr(t.mean(mediator))\n var_alpha = var_explained/(md_var + md_mean_sq)\n alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))\n \n # Model Selection\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n\n # Model 1\n phenotype_mu_null = intercept\n\n # Model 2\n phenotype_mu_mediator = intercept + alpha * mediator\n\n phen = pm.DensityDist('phen',\n lambda value: pm.switch(mediator_model, \n pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value), \n pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)\n ),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n\n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n\n return phenotype_model",
"step-ids": [
28,
29,
46,
49,
55
]
}
|
[
28,
29,
46,
49,
55
] |
# file = open('suifeng.txt')
# # text = file.read()
# # print(text)
# # file.close()
# with open('suifeng.txt') as f:
# print(f.read())
newList=[]
for i in range(11):
newList.append(i*2)
print(newList)
newList2=[i*2 for i in range(11)]
print(newList2)
list = ["小米","王银龙","王思"]
emptyList=[]
for name in list:
if name.startswith('王'):
emptyList.append(name)
print(emptyList)
print([name for name in list if name.startswith('王')])
|
normal
|
{
"blob_id": "3752b68e151379c57e1494715a45172607f4aead",
"index": 8090,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(11):\n newList.append(i * 2)\nprint(newList)\n<mask token>\nprint(newList2)\n<mask token>\nfor name in list:\n if name.startswith('王'):\n emptyList.append(name)\nprint(emptyList)\nprint([name for name in list if name.startswith('王')])\n",
"step-3": "newList = []\nfor i in range(11):\n newList.append(i * 2)\nprint(newList)\nnewList2 = [(i * 2) for i in range(11)]\nprint(newList2)\nlist = ['小米', '王银龙', '王思']\nemptyList = []\nfor name in list:\n if name.startswith('王'):\n emptyList.append(name)\nprint(emptyList)\nprint([name for name in list if name.startswith('王')])\n",
"step-4": "# file = open('suifeng.txt')\n# # text = file.read()\n# # print(text)\n# # file.close()\n\n# with open('suifeng.txt') as f:\n# print(f.read())\n\n\nnewList=[]\nfor i in range(11):\n newList.append(i*2)\nprint(newList)\n\nnewList2=[i*2 for i in range(11)]\nprint(newList2)\n\n\nlist = [\"小米\",\"王银龙\",\"王思\"]\nemptyList=[]\nfor name in list:\n if name.startswith('王'):\n emptyList.append(name)\nprint(emptyList)\n\nprint([name for name in list if name.startswith('王')])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name=NAME, packages=['compoelem', 'compoelem.generate',
'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',
'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True, version=VERSION, description=
'Library for generating and comparing compositional elements from art historic images.'
, author='Tilman Marquart', license='MIT', python_requires='>=3.8',
install_requires=['opencv-python', 'numpy', 'typing', 'shapely',
'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],
setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=
'tests')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NAME = 'compoelem'
VERSION = '0.1.1'
setup(name=NAME, packages=['compoelem', 'compoelem.generate',
'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',
'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True, version=VERSION, description=
'Library for generating and comparing compositional elements from art historic images.'
, author='Tilman Marquart', license='MIT', python_requires='>=3.8',
install_requires=['opencv-python', 'numpy', 'typing', 'shapely',
'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],
setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=
'tests')
<|reserved_special_token_1|>
from setuptools import find_packages, setup
NAME = 'compoelem'
VERSION = '0.1.1'
setup(name=NAME, packages=['compoelem', 'compoelem.generate',
'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',
'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True, version=VERSION, description=
'Library for generating and comparing compositional elements from art historic images.'
, author='Tilman Marquart', license='MIT', python_requires='>=3.8',
install_requires=['opencv-python', 'numpy', 'typing', 'shapely',
'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],
setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=
'tests')
<|reserved_special_token_1|>
from setuptools import find_packages, setup
NAME = 'compoelem'
VERSION = "0.1.1"
setup(
name=NAME,
packages=['compoelem', 'compoelem.generate', 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect', 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True,
version=VERSION,
description='Library for generating and comparing compositional elements from art historic images.',
author='Tilman Marquart',
license='MIT',
python_requires='>=3.8',
install_requires=['opencv-python','numpy','typing','shapely','pyyaml','torch','torchvision','yacs','scikit-image', 'pandas'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
)
|
flexible
|
{
"blob_id": "4f81eb7218fa1341bd7f025a34ec0677d46151b0",
"index": 6542,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-3": "<mask token>\nNAME = 'compoelem'\nVERSION = '0.1.1'\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-4": "from setuptools import find_packages, setup\nNAME = 'compoelem'\nVERSION = '0.1.1'\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-5": "from setuptools import find_packages, setup\nNAME = 'compoelem'\nVERSION = \"0.1.1\"\nsetup(\n name=NAME,\n packages=['compoelem', 'compoelem.generate', 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect', 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True,\n version=VERSION,\n description='Library for generating and comparing compositional elements from art historic images.',\n author='Tilman Marquart',\n license='MIT',\n python_requires='>=3.8',\n install_requires=['opencv-python','numpy','typing','shapely','pyyaml','torch','torchvision','yacs','scikit-image', 'pandas'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n test_suite='tests',\n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import datetime as dt
def sum_unique(x):
return np.unique(x).shape[0]
def analyze_count(data):
"""real time, vk, itemid, action"""
dsct_vk = pd.unique(data['vk'])
dsct_itemid = pd.unique(data['itemid'])
print 'number of user:', dsct_vk.shape
print 'number of items:', dsct_itemid.shape
print 'the number of ratings:', data.shape
print 'unique actions:', pd.unique(data['action'])
print 'the number of action 0:', np.sum(data['action'] == 0)
print 'the number of action 1:', np.sum(data['action'] == 1)
print 'the number of action 2:', np.sum(data['action'] == 2)
print 'the number of action 3:', np.sum(data['action'] == 3)
print 'the number of action 4:', np.sum(data['action'] == 4)
time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)
print 'Max Range:', np.max(time_range_item)
print 'Mean Range:', np.mean(time_range_item)
print 'Median Range:', np.median(time_range_item)
|
normal
|
{
"blob_id": "1db16ae1fc6546575150187432265ac1cf834ec2",
"index": 1809,
"step-1": "import pandas as pd\nimport numpy as np\nimport datetime as dt\n\ndef sum_unique(x):\n return np.unique(x).shape[0]\n\ndef analyze_count(data):\n \n \"\"\"real time, vk, itemid, action\"\"\"\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=
'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',
name='profiles_view'), url('^edit/$', 'edit_profile', name=
'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),
url('^connected_apps/$', 'connected_apps', name=
'profile_connected_apps'), url('^password/$', 'password', name=
'profile_password'), url('^position/$', 'position', name=
'profile_position'), url('^email/$', 'add_email', name=
'profile_add_email'), url('^deleteposition/$', 'delete_position', name=
'profile_delete_position'), url('^email/delete_email/$', 'delete_email',
name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',
name='profile_set_primary'), url('^email/verify_email/$',
'verify_email', name='profile_verify_email'), url(
'^email/toggle_infomail/$', 'toggle_infomail', name=
'profile_toggle_infomail'), url('^email/toggle_jobmail/$',
'toggle_jobmail', name='profile_toggle_jobmail'), url(
'^marks/update_mark_rules/$', 'update_mark_rules', name=
'profile_update_mark_rules'), url('^api_plain_user_search/$',
'api_plain_user_search', name='profiles_api_plain_user_search'), url(
'^api_user_search/$', 'api_user_search', name=
'profiles_api_user_search'), url('^user_search/$', 'user_search', name=
'profiles_user_search'), url('^(?P<active_tab>\\w+)/$', 'index', name=
'profiles_active'))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
urlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=
'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',
name='profiles_view'), url('^edit/$', 'edit_profile', name=
'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),
url('^connected_apps/$', 'connected_apps', name=
'profile_connected_apps'), url('^password/$', 'password', name=
'profile_password'), url('^position/$', 'position', name=
'profile_position'), url('^email/$', 'add_email', name=
'profile_add_email'), url('^deleteposition/$', 'delete_position', name=
'profile_delete_position'), url('^email/delete_email/$', 'delete_email',
name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',
name='profile_set_primary'), url('^email/verify_email/$',
'verify_email', name='profile_verify_email'), url(
'^email/toggle_infomail/$', 'toggle_infomail', name=
'profile_toggle_infomail'), url('^email/toggle_jobmail/$',
'toggle_jobmail', name='profile_toggle_jobmail'), url(
'^marks/update_mark_rules/$', 'update_mark_rules', name=
'profile_update_mark_rules'), url('^api_plain_user_search/$',
'api_plain_user_search', name='profiles_api_plain_user_search'), url(
'^api_user_search/$', 'api_user_search', name=
'profiles_api_user_search'), url('^user_search/$', 'user_search', name=
'profiles_user_search'), url('^(?P<active_tab>\\w+)/$', 'index', name=
'profiles_active'))
<|reserved_special_token_1|>
# -*- encoding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('apps.profiles.views',
url(r'^$', 'index', name='profiles'),
# Show a specific profile.
url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile', name='profiles_view'),
url(r'^edit/$', 'edit_profile', name='profile_edit'),
url(r'^privacy/$', 'privacy', name='profile_privacy'),
url(r'^connected_apps/$', 'connected_apps', name='profile_connected_apps'),
url(r'^password/$', 'password', name='profile_password'),
url(r'^position/$', 'position', name='profile_position'),
url(r'^email/$', 'add_email', name='profile_add_email'),
# Ajax views
url(r'^deleteposition/$', 'delete_position', name='profile_delete_position'),
url(r'^email/delete_email/$', 'delete_email', name='profile_delete_email'),
url(r'^email/set_primary/$', 'set_primary', name='profile_set_primary'),
url(r'^email/verify_email/$', 'verify_email', name='profile_verify_email'),
url(r'^email/toggle_infomail/$', 'toggle_infomail', name='profile_toggle_infomail'),
url(r'^email/toggle_jobmail/$', 'toggle_jobmail', name='profile_toggle_jobmail'),
url(r'^marks/update_mark_rules/$', 'update_mark_rules', name='profile_update_mark_rules'),
# Endpoint that exposes a json lump of all users but only id and name.
url(r'^api_plain_user_search/$', 'api_plain_user_search', name='profiles_api_plain_user_search'),
# Endpoint that exposes a json lump of all users which have set their profile to public.
url(r'^api_user_search/$', 'api_user_search', name='profiles_api_user_search'),
url(r'^user_search/$', 'user_search', name='profiles_user_search'),
# Profile index with active tab.
url(r'^(?P<active_tab>\w+)/$', 'index', name='profiles_active'),
)
|
flexible
|
{
"blob_id": "5707e24596dfe2d85e9a7caa93aa3e253a41ae40",
"index": 6620,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=\n 'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',\n name='profiles_view'), url('^edit/$', 'edit_profile', name=\n 'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),\n url('^connected_apps/$', 'connected_apps', name=\n 'profile_connected_apps'), url('^password/$', 'password', name=\n 'profile_password'), url('^position/$', 'position', name=\n 'profile_position'), url('^email/$', 'add_email', name=\n 'profile_add_email'), url('^deleteposition/$', 'delete_position', name=\n 'profile_delete_position'), url('^email/delete_email/$', 'delete_email',\n name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',\n name='profile_set_primary'), url('^email/verify_email/$',\n 'verify_email', name='profile_verify_email'), url(\n '^email/toggle_infomail/$', 'toggle_infomail', name=\n 'profile_toggle_infomail'), url('^email/toggle_jobmail/$',\n 'toggle_jobmail', name='profile_toggle_jobmail'), url(\n '^marks/update_mark_rules/$', 'update_mark_rules', name=\n 'profile_update_mark_rules'), url('^api_plain_user_search/$',\n 'api_plain_user_search', name='profiles_api_plain_user_search'), url(\n '^api_user_search/$', 'api_user_search', name=\n 'profiles_api_user_search'), url('^user_search/$', 'user_search', name=\n 'profiles_user_search'), url('^(?P<active_tab>\\\\w+)/$', 'index', name=\n 'profiles_active'))\n",
"step-3": "from django.conf.urls import patterns, url\nurlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=\n 'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',\n name='profiles_view'), url('^edit/$', 'edit_profile', name=\n 'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),\n url('^connected_apps/$', 'connected_apps', name=\n 'profile_connected_apps'), url('^password/$', 'password', name=\n 'profile_password'), url('^position/$', 'position', name=\n 'profile_position'), url('^email/$', 'add_email', name=\n 'profile_add_email'), url('^deleteposition/$', 'delete_position', name=\n 'profile_delete_position'), url('^email/delete_email/$', 'delete_email',\n name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',\n name='profile_set_primary'), url('^email/verify_email/$',\n 'verify_email', name='profile_verify_email'), url(\n '^email/toggle_infomail/$', 'toggle_infomail', name=\n 'profile_toggle_infomail'), url('^email/toggle_jobmail/$',\n 'toggle_jobmail', name='profile_toggle_jobmail'), url(\n '^marks/update_mark_rules/$', 'update_mark_rules', name=\n 'profile_update_mark_rules'), url('^api_plain_user_search/$',\n 'api_plain_user_search', name='profiles_api_plain_user_search'), url(\n '^api_user_search/$', 'api_user_search', name=\n 'profiles_api_user_search'), url('^user_search/$', 'user_search', name=\n 'profiles_user_search'), url('^(?P<active_tab>\\\\w+)/$', 'index', name=\n 'profiles_active'))\n",
"step-4": "# -*- encoding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('apps.profiles.views',\n url(r'^$', 'index', name='profiles'),\n\n # Show a specific profile.\n url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile', name='profiles_view'),\n\n url(r'^edit/$', 'edit_profile', name='profile_edit'),\n url(r'^privacy/$', 'privacy', name='profile_privacy'),\n url(r'^connected_apps/$', 'connected_apps', name='profile_connected_apps'),\n url(r'^password/$', 'password', name='profile_password'),\n url(r'^position/$', 'position', name='profile_position'),\n url(r'^email/$', 'add_email', name='profile_add_email'),\n\n # Ajax views\n url(r'^deleteposition/$', 'delete_position', name='profile_delete_position'),\n url(r'^email/delete_email/$', 'delete_email', name='profile_delete_email'),\n url(r'^email/set_primary/$', 'set_primary', name='profile_set_primary'),\n url(r'^email/verify_email/$', 'verify_email', name='profile_verify_email'),\n url(r'^email/toggle_infomail/$', 'toggle_infomail', name='profile_toggle_infomail'),\n url(r'^email/toggle_jobmail/$', 'toggle_jobmail', name='profile_toggle_jobmail'),\n url(r'^marks/update_mark_rules/$', 'update_mark_rules', name='profile_update_mark_rules'),\n \n # Endpoint that exposes a json lump of all users but only id and name. \n url(r'^api_plain_user_search/$', 'api_plain_user_search', name='profiles_api_plain_user_search'),\n\n # Endpoint that exposes a json lump of all users which have set their profile to public.\n url(r'^api_user_search/$', 'api_user_search', name='profiles_api_user_search'),\n url(r'^user_search/$', 'user_search', name='profiles_user_search'),\n\n # Profile index with active tab.\n url(r'^(?P<active_tab>\\w+)/$', 'index', name='profiles_active'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bar = aws.elasticache.get_replication_group(replication_group_id='example')
<|reserved_special_token_1|>
import pulumi
import pulumi_aws as aws
bar = aws.elasticache.get_replication_group(replication_group_id='example')
<|reserved_special_token_1|>
import pulumi
import pulumi_aws as aws
bar = aws.elasticache.get_replication_group(replication_group_id="example")
|
flexible
|
{
"blob_id": "4bf140ae01f2eaa0c67f667766c3ec921d552066",
"index": 6073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbar = aws.elasticache.get_replication_group(replication_group_id='example')\n",
"step-3": "import pulumi\nimport pulumi_aws as aws\nbar = aws.elasticache.get_replication_group(replication_group_id='example')\n",
"step-4": "import pulumi\nimport pulumi_aws as aws\n\nbar = aws.elasticache.get_replication_group(replication_group_id=\"example\")\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
check if word appear in file
'''
# easier solution :
def findKeyInFile(word, filepath):
with open(filepath) as f:
for line in f.readlines():
if line.count(word) > 0:
return line
return None
|
normal
|
{
"blob_id": "97fb2388777bcb459b9818495121fdf8318095ca",
"index": 8881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef findKeyInFile(word, filepath):\n with open(filepath) as f:\n for line in f.readlines():\n if line.count(word) > 0:\n return line\n return None\n",
"step-3": "'''\ncheck if word appear in file\n'''\n# easier solution :\ndef findKeyInFile(word, filepath):\n with open(filepath) as f:\n for line in f.readlines():\n if line.count(word) > 0:\n return line\n return None\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class TriviaTestCase(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(question='What?', answer='huh!',
category=1, difficulty=1)
self.new_question = {'question': 'What?', 'answer': 'What',
'category': 1, 'difficulty': 1}
self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':
1, 'type': 'Science'}}
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
<|reserved_special_token_0|>
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post('/questions/search', json={'search':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post('/questions/search', json={'search':
'Weird search'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
<|reserved_special_token_0|>
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
<|reserved_special_token_0|>
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TriviaTestCase(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(question='What?', answer='huh!',
category=1, difficulty=1)
self.new_question = {'question': 'What?', 'answer': 'What',
'category': 1, 'difficulty': 1}
self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':
1, 'type': 'Science'}}
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
<|reserved_special_token_0|>
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post('/questions/search', json={'search':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post('/questions/search', json={'search':
'Weird search'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
<|reserved_special_token_0|>
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TriviaTestCase(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(question='What?', answer='huh!',
category=1, difficulty=1)
self.new_question = {'question': 'What?', 'answer': 'What',
'category': 1, 'difficulty': 1}
self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':
1, 'type': 'Science'}}
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
def test_delete_questions_if_non_existing_book(self):
res = self.client().delete('/questions/100000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post('/questions/search', json={'search':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post('/questions/search', json={'search':
'Weird search'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
def test_search_questions_failure(self):
res = self.client().post('/questions/search', json={'wrong_key':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'bad request')
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(question='What?', answer='huh!',
category=1, difficulty=1)
self.new_question = {'question': 'What?', 'answer': 'What',
'category': 1, 'difficulty': 1}
self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':
1, 'type': 'Science'}}
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
def test_delete_questions_if_non_existing_book(self):
res = self.client().delete('/questions/100000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post('/questions/search', json={'search':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post('/questions/search', json={'search':
'Weird search'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
def test_search_questions_failure(self):
res = self.client().post('/questions/search', json={'wrong_key':
'Van Gogh'})
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'bad request')
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
"""
TODO
Write at least one test for each test for successful
operation and for expected errors.
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question
DB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')
DB_USER = os.getenv('DB_USER', 'postgres')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')
DB_NAME = os.getenv('DB_NAME', 'trivia_test')
DB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(
question='What?',
answer='huh!',
category=1,
difficulty=1
)
self.new_question = {
'question': 'What?',
'answer': 'What',
'category': 1,
'difficulty': 1
}
self.quizz = {
'previous_questions': [1, 3],
'quiz_category': {'id': 1, 'type': 'Science'}
}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
def test_delete_questions_if_non_existing_book(self):
res = self.client().delete('/questions/100000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Weird search'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
def test_search_questions_failure(self):
res = self.client().post(
'/questions/search', json={'wrong_key': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'bad request')
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
"""
TODO
Write at least one test for each test for successful
operation and for expected errors.
"""
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "364ac79e0f885c67f2fff57dfe3ddde63f0c269e",
"index": 995,
"step-1": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n <mask token>\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n <mask token>\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n <mask token>\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n <mask token>\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n<mask token>\n",
"step-5": "import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question\n\nDB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')\nDB_USER = os.getenv('DB_USER', 'postgres')\nDB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')\nDB_NAME = os.getenv('DB_NAME', 'trivia_test')\nDB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\\\n format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n\n self.question_to_delete = Question(\n question='What?',\n answer='huh!',\n category=1,\n difficulty=1\n )\n\n self.new_question = {\n 'question': 'What?',\n 'answer': 'What',\n 'category': 1,\n 'difficulty': 1\n }\n\n self.quizz = {\n 'previous_questions': [1, 3],\n 'quiz_category': {'id': 1, 'type': 'Science'}\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Weird search'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post(\n '/questions/search', json={'wrong_key': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
15,
16,
18,
19,
23
]
}
|
[
15,
16,
18,
19,
23
] |
'''
# AWS::Chatbot Construct Library
AWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location.
This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.
```python
import aws_cdk.aws_chatbot as chatbot
import aws_cdk.aws_sns as sns
import aws_cdk.aws_iam as iam
slack_channel = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
slack_channel.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["s3:GetObject"
],
resources=["arn:aws:s3:::abc/xyz/123.txt"]
))
slack_channel.add_notification_topic(sns.Topic(self, "MyTopic"))
```
## Log Group
Slack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with
log data set to never expire.
The `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists.
If the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default).
By default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property
allows you to customize the maximum number of retries and base backoff duration.
*Note* that, if `logRetention` is set, a [CloudFormation custom
resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added
to the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the
correct log retention period (never expire, by default).
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import constructs
from .. import (
CfnResource as _CfnResource_9df397a6,
Duration as _Duration_4839e8c3,
IInspectable as _IInspectable_c2943556,
IResolvable as _IResolvable_da3f097b,
IResource as _IResource_c80c4260,
Resource as _Resource_45bc6135,
TreeInspector as _TreeInspector_488e0dd5,
)
from ..aws_cloudwatch import (
Metric as _Metric_e396a4dc,
MetricOptions as _MetricOptions_1788b62f,
Unit as _Unit_61bc6f70,
)
from ..aws_codestarnotifications import (
INotificationRuleTarget as _INotificationRuleTarget_faa3b79b,
NotificationRuleTargetConfig as _NotificationRuleTargetConfig_ea27e095,
)
from ..aws_iam import (
IGrantable as _IGrantable_71c4f5de,
IPrincipal as _IPrincipal_539bb2fd,
IRole as _IRole_235f5d8e,
PolicyStatement as _PolicyStatement_0fe33853,
)
from ..aws_logs import (
LogRetentionRetryOptions as _LogRetentionRetryOptions_62d80a14,
RetentionDays as _RetentionDays_070f99f0,
)
from ..aws_sns import ITopic as _ITopic_9eca4852
@jsii.implements(_IInspectable_c2943556)
class CfnSlackChannelConfiguration(
_CfnResource_9df397a6,
metaclass=jsii.JSIIMeta,
jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration",
):
'''A CloudFormation ``AWS::Chatbot::SlackChannelConfiguration``.
The ``AWS::Chatbot::SlackChannelConfiguration`` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates.
This resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:cloudformationResource: AWS::Chatbot::SlackChannelConfiguration
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html
:exampleMetadata: fixture=_generated
Example::
# The code below shows an example of how to instantiate this type.
# The values are placeholders you should change.
from aws_cdk import aws_chatbot as chatbot
cfn_slack_channel_configuration = chatbot.CfnSlackChannelConfiguration(self, "MyCfnSlackChannelConfiguration",
configuration_name="configurationName",
iam_role_arn="iamRoleArn",
slack_channel_id="slackChannelId",
slack_workspace_id="slackWorkspaceId",
# the properties below are optional
guardrail_policies=["guardrailPolicies"],
logging_level="loggingLevel",
sns_topic_arns=["snsTopicArns"],
user_role_required=False
)
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
configuration_name: builtins.str,
iam_role_arn: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,
logging_level: typing.Optional[builtins.str] = None,
sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,
user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
) -> None:
'''Create a new ``AWS::Chatbot::SlackChannelConfiguration``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param configuration_name: The name of the configuration.
:param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:param user_role_required: Enables use of a user role requirement in your chat configuration.
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.__init__)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = CfnSlackChannelConfigurationProps(
configuration_name=configuration_name,
iam_role_arn=iam_role_arn,
slack_channel_id=slack_channel_id,
slack_workspace_id=slack_workspace_id,
guardrail_policies=guardrail_policies,
logging_level=logging_level,
sns_topic_arns=sns_topic_arns,
user_role_required=user_role_required,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_488e0dd5) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.inspect)
check_type(argname="argument inspector", value=inspector, expected_type=type_hints["inspector"])
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration._render_properties)
check_type(argname="argument props", value=props, expected_type=type_hints["props"])
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
'''
:cloudformationAttribute: Arn
'''
return typing.cast(builtins.str, jsii.get(self, "attrArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="configurationName")
def configuration_name(self) -> builtins.str:
'''The name of the configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname
'''
return typing.cast(builtins.str, jsii.get(self, "configurationName"))
@configuration_name.setter
def configuration_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "configuration_name").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "configurationName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="iamRoleArn")
def iam_role_arn(self) -> builtins.str:
'''The ARN of the IAM role that defines the permissions for AWS Chatbot .
This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn
'''
return typing.cast(builtins.str, jsii.get(self, "iamRoleArn"))
@iam_role_arn.setter
def iam_role_arn(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "iam_role_arn").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "iamRoleArn", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelId")
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelId"))
@slack_channel_id.setter
def slack_channel_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_channel_id").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "slackChannelId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackWorkspaceId")
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot .
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid
'''
return typing.cast(builtins.str, jsii.get(self, "slackWorkspaceId"))
@slack_workspace_id.setter
def slack_workspace_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_workspace_id").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "slackWorkspaceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="guardrailPolicies")
def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:
'''The list of IAM policy ARNs that are applied as channel guardrails.
The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies
'''
return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "guardrailPolicies"))
@guardrail_policies.setter
def guardrail_policies(
self,
value: typing.Optional[typing.List[builtins.str]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "guardrail_policies").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "guardrailPolicies", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="loggingLevel")
def logging_level(self) -> typing.Optional[builtins.str]:
'''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.
Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "loggingLevel"))
@logging_level.setter
def logging_level(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "logging_level").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "loggingLevel", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="snsTopicArns")
def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:
'''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns
'''
return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "snsTopicArns"))
@sns_topic_arns.setter
def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "sns_topic_arns").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "snsTopicArns", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="userRoleRequired")
def user_role_required(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
'''Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired
'''
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, "userRoleRequired"))
@user_role_required.setter
def user_role_required(
self,
value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "user_role_required").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "userRoleRequired", value)
@jsii.data_type(
jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps",
jsii_struct_bases=[],
name_mapping={
"configuration_name": "configurationName",
"iam_role_arn": "iamRoleArn",
"slack_channel_id": "slackChannelId",
"slack_workspace_id": "slackWorkspaceId",
"guardrail_policies": "guardrailPolicies",
"logging_level": "loggingLevel",
"sns_topic_arns": "snsTopicArns",
"user_role_required": "userRoleRequired",
},
)
class CfnSlackChannelConfigurationProps:
def __init__(
self,
*,
configuration_name: builtins.str,
iam_role_arn: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,
logging_level: typing.Optional[builtins.str] = None,
sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,
user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
) -> None:
'''Properties for defining a ``CfnSlackChannelConfiguration``.
:param configuration_name: The name of the configuration.
:param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:param user_role_required: Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html
:exampleMetadata: fixture=_generated
Example::
# The code below shows an example of how to instantiate this type.
# The values are placeholders you should change.
from aws_cdk import aws_chatbot as chatbot
cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(
configuration_name="configurationName",
iam_role_arn="iamRoleArn",
slack_channel_id="slackChannelId",
slack_workspace_id="slackWorkspaceId",
# the properties below are optional
guardrail_policies=["guardrailPolicies"],
logging_level="loggingLevel",
sns_topic_arns=["snsTopicArns"],
user_role_required=False
)
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfigurationProps.__init__)
check_type(argname="argument configuration_name", value=configuration_name, expected_type=type_hints["configuration_name"])
check_type(argname="argument iam_role_arn", value=iam_role_arn, expected_type=type_hints["iam_role_arn"])
check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"])
check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"])
check_type(argname="argument guardrail_policies", value=guardrail_policies, expected_type=type_hints["guardrail_policies"])
check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"])
check_type(argname="argument sns_topic_arns", value=sns_topic_arns, expected_type=type_hints["sns_topic_arns"])
check_type(argname="argument user_role_required", value=user_role_required, expected_type=type_hints["user_role_required"])
self._values: typing.Dict[str, typing.Any] = {
"configuration_name": configuration_name,
"iam_role_arn": iam_role_arn,
"slack_channel_id": slack_channel_id,
"slack_workspace_id": slack_workspace_id,
}
if guardrail_policies is not None:
self._values["guardrail_policies"] = guardrail_policies
if logging_level is not None:
self._values["logging_level"] = logging_level
if sns_topic_arns is not None:
self._values["sns_topic_arns"] = sns_topic_arns
if user_role_required is not None:
self._values["user_role_required"] = user_role_required
@builtins.property
def configuration_name(self) -> builtins.str:
'''The name of the configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname
'''
result = self._values.get("configuration_name")
assert result is not None, "Required property 'configuration_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def iam_role_arn(self) -> builtins.str:
'''The ARN of the IAM role that defines the permissions for AWS Chatbot .
This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn
'''
result = self._values.get("iam_role_arn")
assert result is not None, "Required property 'iam_role_arn' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid
'''
result = self._values.get("slack_channel_id")
assert result is not None, "Required property 'slack_channel_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot .
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid
'''
result = self._values.get("slack_workspace_id")
assert result is not None, "Required property 'slack_workspace_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:
'''The list of IAM policy ARNs that are applied as channel guardrails.
The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies
'''
result = self._values.get("guardrail_policies")
return typing.cast(typing.Optional[typing.List[builtins.str]], result)
@builtins.property
def logging_level(self) -> typing.Optional[builtins.str]:
'''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.
Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel
'''
result = self._values.get("logging_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:
'''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns
'''
result = self._values.get("sns_topic_arns")
return typing.cast(typing.Optional[typing.List[builtins.str]], result)
@builtins.property
def user_role_required(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
'''Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired
'''
result = self._values.get("user_role_required")
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnSlackChannelConfigurationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.interface(jsii_type="aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration")
class ISlackChannelConfiguration(
_IResource_c80c4260,
_IGrantable_71c4f5de,
_INotificationRuleTarget_faa3b79b,
typing_extensions.Protocol,
):
'''Represents a Slack channel configuration.'''
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
:attribute: true
'''
...
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds a statement to the IAM role.
:param statement: -
'''
...
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
class _ISlackChannelConfigurationProxy(
jsii.proxy_for(_IResource_c80c4260), # type: ignore[misc]
jsii.proxy_for(_IGrantable_71c4f5de), # type: ignore[misc]
jsii.proxy_for(_INotificationRuleTarget_faa3b79b), # type: ignore[misc]
):
'''Represents a Slack channel configuration.'''
__jsii_type__: typing.ClassVar[str] = "aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration"
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
:attribute: true
'''
return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role"))
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds a statement to the IAM role.
:param statement: -
'''
if __debug__:
type_hints = typing.get_type_hints(ISlackChannelConfiguration.add_to_role_policy)
check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"])
return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(ISlackChannelConfiguration.metric)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props]))
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, ISlackChannelConfiguration).__jsii_proxy_class__ = lambda : _ISlackChannelConfigurationProxy
@jsii.enum(jsii_type="aws-cdk-lib.aws_chatbot.LoggingLevel")
class LoggingLevel(enum.Enum):
'''Logging levels include ERROR, INFO, or NONE.'''
ERROR = "ERROR"
'''ERROR.'''
INFO = "INFO"
'''INFO.'''
NONE = "NONE"
'''NONE.'''
@jsii.implements(ISlackChannelConfiguration)
class SlackChannelConfiguration(
_Resource_45bc6135,
metaclass=jsii.JSIIMeta,
jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfiguration",
):
'''A new Slack channel configuration.
:exampleMetadata: infused
Example::
import aws_cdk.aws_chatbot as chatbot
# project: codebuild.Project
target = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target)
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
slack_channel_configuration_name: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
logging_level: typing.Optional[LoggingLevel] = None,
log_retention: typing.Optional[_RetentionDays_070f99f0] = None,
log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,
log_retention_role: typing.Optional[_IRole_235f5d8e] = None,
notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,
role: typing.Optional[_IRole_235f5d8e] = None,
) -> None:
'''
:param scope: -
:param id: -
:param slack_channel_configuration_name: The name of Slack channel configuration.
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None
:param role: The permission role of Slack channel configuration. Default: - A role will be created.
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.__init__)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = SlackChannelConfigurationProps(
slack_channel_configuration_name=slack_channel_configuration_name,
slack_channel_id=slack_channel_id,
slack_workspace_id=slack_workspace_id,
logging_level=logging_level,
log_retention=log_retention,
log_retention_retry_options=log_retention_retry_options,
log_retention_role=log_retention_role,
notification_topics=notification_topics,
role=role,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="fromSlackChannelConfigurationArn") # type: ignore[misc]
@builtins.classmethod
def from_slack_channel_configuration_arn(
cls,
scope: constructs.Construct,
id: builtins.str,
slack_channel_configuration_arn: builtins.str,
) -> ISlackChannelConfiguration:
'''Import an existing Slack channel configuration provided an ARN.
:param scope: The parent creating construct.
:param id: The construct's name.
:param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).
:return: a reference to the existing Slack channel configuration
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.from_slack_channel_configuration_arn)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument slack_channel_configuration_arn", value=slack_channel_configuration_arn, expected_type=type_hints["slack_channel_configuration_arn"])
return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls, "fromSlackChannelConfigurationArn", [scope, id, slack_channel_configuration_arn]))
@jsii.member(jsii_name="metricAll") # type: ignore[misc]
@builtins.classmethod
def metric_all(
cls,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for All SlackChannelConfigurations.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.metric_all)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, "metricAll", [metric_name, props]))
@jsii.member(jsii_name="addNotificationTopic")
def add_notification_topic(self, notification_topic: _ITopic_9eca4852) -> None:
'''Adds a SNS topic that deliver notifications to AWS Chatbot.
:param notification_topic: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.add_notification_topic)
check_type(argname="argument notification_topic", value=notification_topic, expected_type=type_hints["notification_topic"])
return typing.cast(None, jsii.invoke(self, "addNotificationTopic", [notification_topic]))
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds extra permission to iam-role of Slack channel configuration.
:param statement: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.add_to_role_policy)
check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"])
return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement]))
@jsii.member(jsii_name="bindAsNotificationRuleTarget")
def bind_as_notification_rule_target(
self,
_scope: constructs.Construct,
) -> _NotificationRuleTargetConfig_ea27e095:
'''Returns a target configuration for notification rule.
:param _scope: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.bind_as_notification_rule_target)
check_type(argname="argument _scope", value=_scope, expected_type=type_hints["_scope"])
return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.invoke(self, "bindAsNotificationRuleTarget", [_scope]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.metric)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="grantPrincipal")
def grant_principal(self) -> _IPrincipal_539bb2fd:
'''The principal to grant permissions to.'''
return typing.cast(_IPrincipal_539bb2fd, jsii.get(self, "grantPrincipal"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.'''
return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role"))
@jsii.data_type(
jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps",
jsii_struct_bases=[],
name_mapping={
"slack_channel_configuration_name": "slackChannelConfigurationName",
"slack_channel_id": "slackChannelId",
"slack_workspace_id": "slackWorkspaceId",
"logging_level": "loggingLevel",
"log_retention": "logRetention",
"log_retention_retry_options": "logRetentionRetryOptions",
"log_retention_role": "logRetentionRole",
"notification_topics": "notificationTopics",
"role": "role",
},
)
class SlackChannelConfigurationProps:
def __init__(
self,
*,
slack_channel_configuration_name: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
logging_level: typing.Optional[LoggingLevel] = None,
log_retention: typing.Optional[_RetentionDays_070f99f0] = None,
log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,
log_retention_role: typing.Optional[_IRole_235f5d8e] = None,
notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,
role: typing.Optional[_IRole_235f5d8e] = None,
) -> None:
'''Properties for a new Slack channel configuration.
:param slack_channel_configuration_name: The name of Slack channel configuration.
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None
:param role: The permission role of Slack channel configuration. Default: - A role will be created.
:exampleMetadata: infused
Example::
import aws_cdk.aws_chatbot as chatbot
# project: codebuild.Project
target = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target)
'''
if isinstance(log_retention_retry_options, dict):
log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**log_retention_retry_options)
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfigurationProps.__init__)
check_type(argname="argument slack_channel_configuration_name", value=slack_channel_configuration_name, expected_type=type_hints["slack_channel_configuration_name"])
check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"])
check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"])
check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"])
check_type(argname="argument log_retention", value=log_retention, expected_type=type_hints["log_retention"])
check_type(argname="argument log_retention_retry_options", value=log_retention_retry_options, expected_type=type_hints["log_retention_retry_options"])
check_type(argname="argument log_retention_role", value=log_retention_role, expected_type=type_hints["log_retention_role"])
check_type(argname="argument notification_topics", value=notification_topics, expected_type=type_hints["notification_topics"])
check_type(argname="argument role", value=role, expected_type=type_hints["role"])
self._values: typing.Dict[str, typing.Any] = {
"slack_channel_configuration_name": slack_channel_configuration_name,
"slack_channel_id": slack_channel_id,
"slack_workspace_id": slack_workspace_id,
}
if logging_level is not None:
self._values["logging_level"] = logging_level
if log_retention is not None:
self._values["log_retention"] = log_retention
if log_retention_retry_options is not None:
self._values["log_retention_retry_options"] = log_retention_retry_options
if log_retention_role is not None:
self._values["log_retention_role"] = log_retention_role
if notification_topics is not None:
self._values["notification_topics"] = notification_topics
if role is not None:
self._values["role"] = role
@builtins.property
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.'''
result = self._values.get("slack_channel_configuration_name")
assert result is not None, "Required property 'slack_channel_configuration_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.
The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
'''
result = self._values.get("slack_channel_id")
assert result is not None, "Required property 'slack_channel_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot.
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.
Then you can copy and paste the workspace ID from the console.
For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro
'''
result = self._values.get("slack_workspace_id")
assert result is not None, "Required property 'slack_workspace_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def logging_level(self) -> typing.Optional[LoggingLevel]:
'''Specifies the logging level for this configuration.
This property affects the log entries pushed to Amazon CloudWatch Logs.
:default: LoggingLevel.NONE
'''
result = self._values.get("logging_level")
return typing.cast(typing.Optional[LoggingLevel], result)
@builtins.property
def log_retention(self) -> typing.Optional[_RetentionDays_070f99f0]:
'''The number of days log events are kept in CloudWatch Logs.
When updating
this property, unsetting it doesn't remove the log retention policy. To
remove the retention policy, set the value to ``INFINITE``.
:default: logs.RetentionDays.INFINITE
'''
result = self._values.get("log_retention")
return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)
@builtins.property
def log_retention_retry_options(
self,
) -> typing.Optional[_LogRetentionRetryOptions_62d80a14]:
'''When log retention is specified, a custom resource attempts to create the CloudWatch log group.
These options control the retry policy when interacting with CloudWatch APIs.
:default: - Default AWS SDK retry options.
'''
result = self._values.get("log_retention_retry_options")
return typing.cast(typing.Optional[_LogRetentionRetryOptions_62d80a14], result)
@builtins.property
def log_retention_role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The IAM role for the Lambda function associated with the custom resource that sets the retention policy.
:default: - A new role is created.
'''
result = self._values.get("log_retention_role")
return typing.cast(typing.Optional[_IRole_235f5d8e], result)
@builtins.property
def notification_topics(self) -> typing.Optional[typing.List[_ITopic_9eca4852]]:
'''The SNS topics that deliver notifications to AWS Chatbot.
:default: None
'''
result = self._values.get("notification_topics")
return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]], result)
@builtins.property
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
'''
result = self._values.get("role")
return typing.cast(typing.Optional[_IRole_235f5d8e], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SlackChannelConfigurationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnSlackChannelConfiguration",
"CfnSlackChannelConfigurationProps",
"ISlackChannelConfiguration",
"LoggingLevel",
"SlackChannelConfiguration",
"SlackChannelConfigurationProps",
]
publication.publish()
|
normal
|
{
"blob_id": "937fd6aa7bd21258bd6e0f592d94a966519ef885",
"index": 9458,
"step-1": "<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n <mask token>\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n <mask token>\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelId')\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackChannelId'))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='loggingLevel')\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n return typing.cast(typing.Optional[builtins.str], jsii.get(self,\n 'loggingLevel'))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'logging_level').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'loggingLevel', value)\n\n @builtins.property\n @jsii.member(jsii_name='snsTopicArns')\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'snsTopicArns'))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]\n ) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'sns_topic_arns').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'snsTopicArns', value)\n <mask token>\n <mask token>\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n configuration_name: builtins.str, iam_role_arn: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]]=\n None, logging_level: typing.Optional[builtins.str]=None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]]=None,\n user_role_required: typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Create a new ``AWS::Chatbot::SlackChannelConfiguration``.\n\n :param scope: - scope in which this resource is defined.\n :param id: - scoped id of the resource.\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n .__init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = CfnSlackChannelConfigurationProps(configuration_name=\n configuration_name, iam_role_arn=iam_role_arn, slack_channel_id\n =slack_channel_id, slack_workspace_id=slack_workspace_id,\n guardrail_policies=guardrail_policies, logging_level=\n logging_level, sns_topic_arns=sns_topic_arns,\n user_role_required=user_role_required)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='inspect')\n def inspect(self, inspector: _TreeInspector_488e0dd5) ->None:\n \"\"\"Examines the CloudFormation resource and discloses attributes.\n\n :param inspector: - tree inspector to collect and process attributes.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n .inspect)\n check_type(argname='argument inspector', value=inspector,\n expected_type=type_hints['inspector'])\n return typing.cast(None, jsii.invoke(self, 'inspect', [inspector]))\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n\n @jsii.python.classproperty\n @jsii.member(jsii_name='CFN_RESOURCE_TYPE_NAME')\n def CFN_RESOURCE_TYPE_NAME(cls) ->builtins.str:\n \"\"\"The CloudFormation resource type name for this resource class.\"\"\"\n return typing.cast(builtins.str, jsii.sget(cls,\n 'CFN_RESOURCE_TYPE_NAME'))\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='cfnProperties')\n def _cfn_properties(self) ->typing.Mapping[builtins.str, typing.Any]:\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n get(self, 'cfnProperties'))\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelId')\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackChannelId'))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n\n @builtins.property\n @jsii.member(jsii_name='slackWorkspaceId')\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackWorkspaceId'))\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='guardrailPolicies')\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'guardrailPolicies'))\n\n @guardrail_policies.setter\n def guardrail_policies(self, value: typing.Optional[typing.List[\n builtins.str]]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'guardrail_policies').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'guardrailPolicies', value)\n\n @builtins.property\n @jsii.member(jsii_name='loggingLevel')\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n return typing.cast(typing.Optional[builtins.str], jsii.get(self,\n 'loggingLevel'))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'logging_level').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'loggingLevel', value)\n\n @builtins.property\n @jsii.member(jsii_name='snsTopicArns')\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'snsTopicArns'))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]\n ) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'sns_topic_arns').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'snsTopicArns', value)\n\n @builtins.property\n @jsii.member(jsii_name='userRoleRequired')\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], jsii.get(self, 'userRoleRequired'))\n\n @user_role_required.setter\n def user_role_required(self, value: typing.Optional[typing.Union[\n builtins.bool, _IResolvable_da3f097b]]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'user_role_required').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'userRoleRequired', value)\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n",
"step-5": "'''\n# AWS::Chatbot Construct Library\n\nAWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n```python\nimport aws_cdk.aws_chatbot as chatbot\nimport aws_cdk.aws_sns as sns\nimport aws_cdk.aws_iam as iam\n\n\nslack_channel = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n)\n\nslack_channel.add_to_role_policy(iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n actions=[\"s3:GetObject\"\n ],\n resources=[\"arn:aws:s3:::abc/xyz/123.txt\"]\n))\n\nslack_channel.add_notification_topic(sns.Topic(self, \"MyTopic\"))\n```\n\n## Log Group\n\nSlack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with\nlog data set to never expire.\n\nThe `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists.\nIf the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default).\n\nBy default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property\nallows you to customize the maximum number of retries and base backoff duration.\n\n*Note* that, if `logRetention` is set, a [CloudFormation custom\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added\nto the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the\ncorrect log retention period (never expire, by default).\n'''\nimport abc\nimport builtins\nimport datetime\nimport enum\nimport typing\n\nimport jsii\nimport publication\nimport typing_extensions\n\nfrom typeguard import check_type\n\nfrom .._jsii import *\n\nimport constructs\nfrom .. import (\n CfnResource as _CfnResource_9df397a6,\n Duration as _Duration_4839e8c3,\n IInspectable as _IInspectable_c2943556,\n IResolvable as _IResolvable_da3f097b,\n IResource as _IResource_c80c4260,\n Resource as _Resource_45bc6135,\n TreeInspector as _TreeInspector_488e0dd5,\n)\nfrom ..aws_cloudwatch import (\n Metric as _Metric_e396a4dc,\n MetricOptions as _MetricOptions_1788b62f,\n Unit as _Unit_61bc6f70,\n)\nfrom ..aws_codestarnotifications import (\n INotificationRuleTarget as _INotificationRuleTarget_faa3b79b,\n NotificationRuleTargetConfig as _NotificationRuleTargetConfig_ea27e095,\n)\nfrom ..aws_iam import (\n IGrantable as _IGrantable_71c4f5de,\n IPrincipal as _IPrincipal_539bb2fd,\n IRole as _IRole_235f5d8e,\n PolicyStatement as _PolicyStatement_0fe33853,\n)\nfrom ..aws_logs import (\n LogRetentionRetryOptions as _LogRetentionRetryOptions_62d80a14,\n RetentionDays as _RetentionDays_070f99f0,\n)\nfrom ..aws_sns import ITopic as _ITopic_9eca4852\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(\n _CfnResource_9df397a6,\n metaclass=jsii.JSIIMeta,\n jsii_type=\"aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration\",\n):\n '''A CloudFormation ``AWS::Chatbot::SlackChannelConfiguration``.\n\n The ``AWS::Chatbot::SlackChannelConfiguration`` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates.\n\n This resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :cloudformationResource: AWS::Chatbot::SlackChannelConfiguration\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration = chatbot.CfnSlackChannelConfiguration(self, \"MyCfnSlackChannelConfiguration\",\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n '''\n\n def __init__(\n self,\n scope: constructs.Construct,\n id: builtins.str,\n *,\n configuration_name: builtins.str,\n iam_role_arn: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,\n logging_level: typing.Optional[builtins.str] = None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,\n user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,\n ) -> None:\n '''Create a new ``AWS::Chatbot::SlackChannelConfiguration``.\n\n :param scope: - scope in which this resource is defined.\n :param id: - scoped id of the resource.\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.__init__)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n props = CfnSlackChannelConfigurationProps(\n configuration_name=configuration_name,\n iam_role_arn=iam_role_arn,\n slack_channel_id=slack_channel_id,\n slack_workspace_id=slack_workspace_id,\n guardrail_policies=guardrail_policies,\n logging_level=logging_level,\n sns_topic_arns=sns_topic_arns,\n user_role_required=user_role_required,\n )\n\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name=\"inspect\")\n def inspect(self, inspector: _TreeInspector_488e0dd5) -> None:\n '''Examines the CloudFormation resource and discloses attributes.\n\n :param inspector: - tree inspector to collect and process attributes.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.inspect)\n check_type(argname=\"argument inspector\", value=inspector, expected_type=type_hints[\"inspector\"])\n return typing.cast(None, jsii.invoke(self, \"inspect\", [inspector]))\n\n @jsii.member(jsii_name=\"renderProperties\")\n def _render_properties(\n self,\n props: typing.Mapping[builtins.str, typing.Any],\n ) -> typing.Mapping[builtins.str, typing.Any]:\n '''\n :param props: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration._render_properties)\n check_type(argname=\"argument props\", value=props, expected_type=type_hints[\"props\"])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, \"renderProperties\", [props]))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"CFN_RESOURCE_TYPE_NAME\")\n def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:\n '''The CloudFormation resource type name for this resource class.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"attrArn\")\n def attr_arn(self) -> builtins.str:\n '''\n :cloudformationAttribute: Arn\n '''\n return typing.cast(builtins.str, jsii.get(self, \"attrArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"cfnProperties\")\n def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, \"cfnProperties\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"configurationName\")\n def configuration_name(self) -> builtins.str:\n '''The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n '''\n return typing.cast(builtins.str, jsii.get(self, \"configurationName\"))\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"configuration_name\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"configurationName\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"iamRoleArn\")\n def iam_role_arn(self) -> builtins.str:\n '''The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n '''\n return typing.cast(builtins.str, jsii.get(self, \"iamRoleArn\"))\n\n @iam_role_arn.setter\n def iam_role_arn(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"iam_role_arn\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"iamRoleArn\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelId\")\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelId\"))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"slack_channel_id\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"slackChannelId\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackWorkspaceId\")\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackWorkspaceId\"))\n\n @slack_workspace_id.setter\n def slack_workspace_id(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"slack_workspace_id\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"slackWorkspaceId\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"guardrailPolicies\")\n def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n '''\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, \"guardrailPolicies\"))\n\n @guardrail_policies.setter\n def guardrail_policies(\n self,\n value: typing.Optional[typing.List[builtins.str]],\n ) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"guardrail_policies\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"guardrailPolicies\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"loggingLevel\")\n def logging_level(self) -> typing.Optional[builtins.str]:\n '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n '''\n return typing.cast(typing.Optional[builtins.str], jsii.get(self, \"loggingLevel\"))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"logging_level\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"loggingLevel\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"snsTopicArns\")\n def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n '''\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, \"snsTopicArns\"))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"sns_topic_arns\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"snsTopicArns\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"userRoleRequired\")\n def user_role_required(\n self,\n ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:\n '''Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n '''\n return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, \"userRoleRequired\"))\n\n @user_role_required.setter\n def user_role_required(\n self,\n value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],\n ) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"user_role_required\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"userRoleRequired\", value)\n\n\[email protected]_type(\n jsii_type=\"aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps\",\n jsii_struct_bases=[],\n name_mapping={\n \"configuration_name\": \"configurationName\",\n \"iam_role_arn\": \"iamRoleArn\",\n \"slack_channel_id\": \"slackChannelId\",\n \"slack_workspace_id\": \"slackWorkspaceId\",\n \"guardrail_policies\": \"guardrailPolicies\",\n \"logging_level\": \"loggingLevel\",\n \"sns_topic_arns\": \"snsTopicArns\",\n \"user_role_required\": \"userRoleRequired\",\n },\n)\nclass CfnSlackChannelConfigurationProps:\n def __init__(\n self,\n *,\n configuration_name: builtins.str,\n iam_role_arn: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,\n logging_level: typing.Optional[builtins.str] = None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,\n user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,\n ) -> None:\n '''Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfigurationProps.__init__)\n check_type(argname=\"argument configuration_name\", value=configuration_name, expected_type=type_hints[\"configuration_name\"])\n check_type(argname=\"argument iam_role_arn\", value=iam_role_arn, expected_type=type_hints[\"iam_role_arn\"])\n check_type(argname=\"argument slack_channel_id\", value=slack_channel_id, expected_type=type_hints[\"slack_channel_id\"])\n check_type(argname=\"argument slack_workspace_id\", value=slack_workspace_id, expected_type=type_hints[\"slack_workspace_id\"])\n check_type(argname=\"argument guardrail_policies\", value=guardrail_policies, expected_type=type_hints[\"guardrail_policies\"])\n check_type(argname=\"argument logging_level\", value=logging_level, expected_type=type_hints[\"logging_level\"])\n check_type(argname=\"argument sns_topic_arns\", value=sns_topic_arns, expected_type=type_hints[\"sns_topic_arns\"])\n check_type(argname=\"argument user_role_required\", value=user_role_required, expected_type=type_hints[\"user_role_required\"])\n self._values: typing.Dict[str, typing.Any] = {\n \"configuration_name\": configuration_name,\n \"iam_role_arn\": iam_role_arn,\n \"slack_channel_id\": slack_channel_id,\n \"slack_workspace_id\": slack_workspace_id,\n }\n if guardrail_policies is not None:\n self._values[\"guardrail_policies\"] = guardrail_policies\n if logging_level is not None:\n self._values[\"logging_level\"] = logging_level\n if sns_topic_arns is not None:\n self._values[\"sns_topic_arns\"] = sns_topic_arns\n if user_role_required is not None:\n self._values[\"user_role_required\"] = user_role_required\n\n @builtins.property\n def configuration_name(self) -> builtins.str:\n '''The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n '''\n result = self._values.get(\"configuration_name\")\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) -> builtins.str:\n '''The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n '''\n result = self._values.get(\"iam_role_arn\")\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n '''\n result = self._values.get(\"slack_channel_id\")\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n '''\n result = self._values.get(\"slack_workspace_id\")\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n '''\n result = self._values.get(\"guardrail_policies\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) -> typing.Optional[builtins.str]:\n '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n '''\n result = self._values.get(\"logging_level\")\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n '''\n result = self._values.get(\"sns_topic_arns\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(\n self,\n ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:\n '''Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n '''\n result = self._values.get(\"user_role_required\")\n return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"CfnSlackChannelConfigurationProps(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\[email protected](jsii_type=\"aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration\")\nclass ISlackChannelConfiguration(\n _IResource_c80c4260,\n _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b,\n typing_extensions.Protocol,\n):\n '''Represents a Slack channel configuration.'''\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n '''\n ...\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.\n\n :attribute: true\n '''\n ...\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n '''\n ...\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds a statement to the IAM role.\n\n :param statement: -\n '''\n ...\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n ...\n\n\nclass _ISlackChannelConfigurationProxy(\n jsii.proxy_for(_IResource_c80c4260), # type: ignore[misc]\n jsii.proxy_for(_IGrantable_71c4f5de), # type: ignore[misc]\n jsii.proxy_for(_INotificationRuleTarget_faa3b79b), # type: ignore[misc]\n):\n '''Represents a Slack channel configuration.'''\n\n __jsii_type__: typing.ClassVar[str] = \"aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration\"\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.\n\n :attribute: true\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationName\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n '''\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, \"role\"))\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds a statement to the IAM role.\n\n :param statement: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.add_to_role_policy)\n check_type(argname=\"argument statement\", value=statement, expected_type=type_hints[\"statement\"])\n return typing.cast(None, jsii.invoke(self, \"addToRolePolicy\", [statement]))\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.metric)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, \"metric\", [metric_name, props]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, ISlackChannelConfiguration).__jsii_proxy_class__ = lambda : _ISlackChannelConfigurationProxy\n\n\[email protected](jsii_type=\"aws-cdk-lib.aws_chatbot.LoggingLevel\")\nclass LoggingLevel(enum.Enum):\n '''Logging levels include ERROR, INFO, or NONE.'''\n\n ERROR = \"ERROR\"\n '''ERROR.'''\n INFO = \"INFO\"\n '''INFO.'''\n NONE = \"NONE\"\n '''NONE.'''\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(\n _Resource_45bc6135,\n metaclass=jsii.JSIIMeta,\n jsii_type=\"aws-cdk-lib.aws_chatbot.SlackChannelConfiguration\",\n):\n '''A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n '''\n\n def __init__(\n self,\n scope: constructs.Construct,\n id: builtins.str,\n *,\n slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel] = None,\n log_retention: typing.Optional[_RetentionDays_070f99f0] = None,\n log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,\n log_retention_role: typing.Optional[_IRole_235f5d8e] = None,\n notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,\n role: typing.Optional[_IRole_235f5d8e] = None,\n ) -> None:\n '''\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.__init__)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n props = SlackChannelConfigurationProps(\n slack_channel_configuration_name=slack_channel_configuration_name,\n slack_channel_id=slack_channel_id,\n slack_workspace_id=slack_workspace_id,\n logging_level=logging_level,\n log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role,\n notification_topics=notification_topics,\n role=role,\n )\n\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name=\"fromSlackChannelConfigurationArn\") # type: ignore[misc]\n @builtins.classmethod\n def from_slack_channel_configuration_arn(\n cls,\n scope: constructs.Construct,\n id: builtins.str,\n slack_channel_configuration_arn: builtins.str,\n ) -> ISlackChannelConfiguration:\n '''Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.from_slack_channel_configuration_arn)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n check_type(argname=\"argument slack_channel_configuration_arn\", value=slack_channel_configuration_arn, expected_type=type_hints[\"slack_channel_configuration_arn\"])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls, \"fromSlackChannelConfigurationArn\", [scope, id, slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name=\"metricAll\") # type: ignore[misc]\n @builtins.classmethod\n def metric_all(\n cls,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric_all)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, \"metricAll\", [metric_name, props]))\n\n @jsii.member(jsii_name=\"addNotificationTopic\")\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852) -> None:\n '''Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.add_notification_topic)\n check_type(argname=\"argument notification_topic\", value=notification_topic, expected_type=type_hints[\"notification_topic\"])\n return typing.cast(None, jsii.invoke(self, \"addNotificationTopic\", [notification_topic]))\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.add_to_role_policy)\n check_type(argname=\"argument statement\", value=statement, expected_type=type_hints[\"statement\"])\n return typing.cast(None, jsii.invoke(self, \"addToRolePolicy\", [statement]))\n\n @jsii.member(jsii_name=\"bindAsNotificationRuleTarget\")\n def bind_as_notification_rule_target(\n self,\n _scope: constructs.Construct,\n ) -> _NotificationRuleTargetConfig_ea27e095:\n '''Returns a target configuration for notification rule.\n\n :param _scope: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.bind_as_notification_rule_target)\n check_type(argname=\"argument _scope\", value=_scope, expected_type=type_hints[\"_scope\"])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.invoke(self, \"bindAsNotificationRuleTarget\", [_scope]))\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, \"metric\", [metric_name, props]))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"grantPrincipal\")\n def grant_principal(self) -> _IPrincipal_539bb2fd:\n '''The principal to grant permissions to.'''\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self, \"grantPrincipal\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.'''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.'''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationName\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.'''\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, \"role\"))\n\n\[email protected]_type(\n jsii_type=\"aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps\",\n jsii_struct_bases=[],\n name_mapping={\n \"slack_channel_configuration_name\": \"slackChannelConfigurationName\",\n \"slack_channel_id\": \"slackChannelId\",\n \"slack_workspace_id\": \"slackWorkspaceId\",\n \"logging_level\": \"loggingLevel\",\n \"log_retention\": \"logRetention\",\n \"log_retention_retry_options\": \"logRetentionRetryOptions\",\n \"log_retention_role\": \"logRetentionRole\",\n \"notification_topics\": \"notificationTopics\",\n \"role\": \"role\",\n },\n)\nclass SlackChannelConfigurationProps:\n def __init__(\n self,\n *,\n slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel] = None,\n log_retention: typing.Optional[_RetentionDays_070f99f0] = None,\n log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,\n log_retention_role: typing.Optional[_IRole_235f5d8e] = None,\n notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,\n role: typing.Optional[_IRole_235f5d8e] = None,\n ) -> None:\n '''Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n '''\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps.__init__)\n check_type(argname=\"argument slack_channel_configuration_name\", value=slack_channel_configuration_name, expected_type=type_hints[\"slack_channel_configuration_name\"])\n check_type(argname=\"argument slack_channel_id\", value=slack_channel_id, expected_type=type_hints[\"slack_channel_id\"])\n check_type(argname=\"argument slack_workspace_id\", value=slack_workspace_id, expected_type=type_hints[\"slack_workspace_id\"])\n check_type(argname=\"argument logging_level\", value=logging_level, expected_type=type_hints[\"logging_level\"])\n check_type(argname=\"argument log_retention\", value=log_retention, expected_type=type_hints[\"log_retention\"])\n check_type(argname=\"argument log_retention_retry_options\", value=log_retention_retry_options, expected_type=type_hints[\"log_retention_retry_options\"])\n check_type(argname=\"argument log_retention_role\", value=log_retention_role, expected_type=type_hints[\"log_retention_role\"])\n check_type(argname=\"argument notification_topics\", value=notification_topics, expected_type=type_hints[\"notification_topics\"])\n check_type(argname=\"argument role\", value=role, expected_type=type_hints[\"role\"])\n self._values: typing.Dict[str, typing.Any] = {\n \"slack_channel_configuration_name\": slack_channel_configuration_name,\n \"slack_channel_id\": slack_channel_id,\n \"slack_workspace_id\": slack_workspace_id,\n }\n if logging_level is not None:\n self._values[\"logging_level\"] = logging_level\n if log_retention is not None:\n self._values[\"log_retention\"] = log_retention\n if log_retention_retry_options is not None:\n self._values[\"log_retention_retry_options\"] = log_retention_retry_options\n if log_retention_role is not None:\n self._values[\"log_retention_role\"] = log_retention_role\n if notification_topics is not None:\n self._values[\"notification_topics\"] = notification_topics\n if role is not None:\n self._values[\"role\"] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.'''\n result = self._values.get(\"slack_channel_configuration_name\")\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n '''\n result = self._values.get(\"slack_channel_id\")\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n '''\n result = self._values.get(\"slack_workspace_id\")\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) -> typing.Optional[LoggingLevel]:\n '''Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n '''\n result = self._values.get(\"logging_level\")\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) -> typing.Optional[_RetentionDays_070f99f0]:\n '''The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n '''\n result = self._values.get(\"log_retention\")\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(\n self,\n ) -> typing.Optional[_LogRetentionRetryOptions_62d80a14]:\n '''When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n '''\n result = self._values.get(\"log_retention_retry_options\")\n return typing.cast(typing.Optional[_LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n '''\n result = self._values.get(\"log_retention_role\")\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) -> typing.Optional[typing.List[_ITopic_9eca4852]]:\n '''The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n '''\n result = self._values.get(\"notification_topics\")\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]], result)\n\n @builtins.property\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n '''\n result = self._values.get(\"role\")\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"SlackChannelConfigurationProps(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n__all__ = [\n \"CfnSlackChannelConfiguration\",\n \"CfnSlackChannelConfigurationProps\",\n \"ISlackChannelConfiguration\",\n \"LoggingLevel\",\n \"SlackChannelConfiguration\",\n \"SlackChannelConfigurationProps\",\n]\n\npublication.publish()\n",
"step-ids": [
39,
61,
66,
75,
85
]
}
|
[
39,
61,
66,
75,
85
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-31 07:54
from __future__ import unicode_literals
import codenerix.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0005_remove_product_name'),
]
operations = [
migrations.CreateModel(
name='BrandTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=False, verbose_name='Public')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BrandTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=False, verbose_name='Public')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='brand',
name='name',
),
migrations.RemoveField(
model_name='brand',
name='slug',
),
migrations.AddField(
model_name='brandtextes',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='es', to='codenerix_products.Brand'),
),
migrations.AddField(
model_name='brandtexten',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='en', to='codenerix_products.Brand'),
),
]
|
normal
|
{
"blob_id": "0aed35827e6579f7a9434d252d0b9150ab24adf9",
"index": 4573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('codenerix_products', '0005_remove_product_name')]\n operations = [migrations.CreateModel(name='BrandTextEN', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, verbose_name='Created')), ('updated', models.\n DateTimeField(auto_now=True, verbose_name='Updated')), (\n 'meta_title', models.CharField(blank=True, max_length=70, null=True,\n verbose_name='Meta Title')), ('meta_description', models.CharField(\n blank=True, max_length=70, null=True, verbose_name=\n 'Meta Description')), ('description_short', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description short')), ('description_long', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description long')), ('slug', models.CharField(max_length=250,\n unique=True, verbose_name='Slug')), ('name', models.CharField(blank\n =True, max_length=250, null=True, verbose_name='Name')), ('public',\n models.BooleanField(default=False, verbose_name='Public'))],\n options={'abstract': False}), migrations.CreateModel(name=\n 'BrandTextES', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('created',\n models.DateTimeField(auto_now_add=True, verbose_name='Created')), (\n 'updated', models.DateTimeField(auto_now=True, verbose_name=\n 'Updated')), ('meta_title', models.CharField(blank=True, max_length\n =70, null=True, verbose_name='Meta Title')), ('meta_description',\n models.CharField(blank=True, max_length=70, null=True, verbose_name\n ='Meta Description')), ('description_short', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description short')), ('description_long', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description long')), ('slug', models.CharField(max_length=250,\n unique=True, verbose_name='Slug')), ('name', models.CharField(blank\n =True, max_length=250, null=True, verbose_name='Name')), ('public',\n models.BooleanField(default=False, verbose_name='Public'))],\n options={'abstract': False}), migrations.RemoveField(model_name=\n 'brand', name='name'), migrations.RemoveField(model_name='brand',\n name='slug'), migrations.AddField(model_name='brandtextes', name=\n 'brand', field=models.OneToOneField(on_delete=django.db.models.\n deletion.CASCADE, related_name='es', to='codenerix_products.Brand')\n ), migrations.AddField(model_name='brandtexten', name='brand',\n field=models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='en', to='codenerix_products.Brand'))]\n",
"step-4": "from __future__ import unicode_literals\nimport codenerix.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('codenerix_products', '0005_remove_product_name')]\n operations = [migrations.CreateModel(name='BrandTextEN', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, verbose_name='Created')), ('updated', models.\n DateTimeField(auto_now=True, verbose_name='Updated')), (\n 'meta_title', models.CharField(blank=True, max_length=70, null=True,\n verbose_name='Meta Title')), ('meta_description', models.CharField(\n blank=True, max_length=70, null=True, verbose_name=\n 'Meta Description')), ('description_short', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description short')), ('description_long', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description long')), ('slug', models.CharField(max_length=250,\n unique=True, verbose_name='Slug')), ('name', models.CharField(blank\n =True, max_length=250, null=True, verbose_name='Name')), ('public',\n models.BooleanField(default=False, verbose_name='Public'))],\n options={'abstract': False}), migrations.CreateModel(name=\n 'BrandTextES', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('created',\n models.DateTimeField(auto_now_add=True, verbose_name='Created')), (\n 'updated', models.DateTimeField(auto_now=True, verbose_name=\n 'Updated')), ('meta_title', models.CharField(blank=True, max_length\n =70, null=True, verbose_name='Meta Title')), ('meta_description',\n models.CharField(blank=True, max_length=70, null=True, verbose_name\n ='Meta Description')), ('description_short', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description short')), ('description_long', codenerix.fields.\n WysiwygAngularField(blank=True, null=True, verbose_name=\n 'Description long')), ('slug', models.CharField(max_length=250,\n unique=True, verbose_name='Slug')), ('name', models.CharField(blank\n =True, max_length=250, null=True, verbose_name='Name')), ('public',\n models.BooleanField(default=False, verbose_name='Public'))],\n options={'abstract': False}), migrations.RemoveField(model_name=\n 'brand', name='name'), migrations.RemoveField(model_name='brand',\n name='slug'), migrations.AddField(model_name='brandtextes', name=\n 'brand', field=models.OneToOneField(on_delete=django.db.models.\n deletion.CASCADE, related_name='es', to='codenerix_products.Brand')\n ), migrations.AddField(model_name='brandtexten', name='brand',\n field=models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='en', to='codenerix_products.Brand'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-03-31 07:54\nfrom __future__ import unicode_literals\n\nimport codenerix.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('codenerix_products', '0005_remove_product_name'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BrandTextEN',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),\n ('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),\n ('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),\n ('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),\n ('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),\n ('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),\n ('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),\n ('public', models.BooleanField(default=False, verbose_name='Public')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BrandTextES',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),\n ('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),\n ('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),\n ('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),\n ('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),\n ('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),\n ('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),\n ('public', models.BooleanField(default=False, verbose_name='Public')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.RemoveField(\n model_name='brand',\n name='name',\n ),\n migrations.RemoveField(\n model_name='brand',\n name='slug',\n ),\n migrations.AddField(\n model_name='brandtextes',\n name='brand',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='es', to='codenerix_products.Brand'),\n ),\n migrations.AddField(\n model_name='brandtexten',\n name='brand',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='en', to='codenerix_products.Brand'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BrandReg(BrandRegBasic):
def __init__(self, base_folder, log_instance, input_lst=None):
super(BrandReg, self).__init__(base_folder, log_instance)
input_file = base_folder + '/dp_brands_result.txt'
if not os.path.exists(input_file):
raise Exception('%s does not exist!' % input_file)
output_file = base_folder + '/dp_brands_result.txt.brandreg'
self._input_p = input_file
self._input_lst = input_lst
self._output_p = output_file
def _brand_exchange(self, ori_brand):
if ori_brand in self.exchange_brand_pair:
return self.exchange_brand_pair[ori_brand]
else:
return ori_brand
def brand_reg(self):
stp1_lst = []
idx = 0
if self._input_lst != None and len(self._input_lst) > 0:
self.logger.info('增量数据处理')
for line in self._input_lst:
idx += 1
if idx % 10000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
elif os.path.exists(self._input_p):
f_input = open(self._input_p)
for line in f_input:
idx += 1
if idx % 100000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
f_input.close()
else:
raise Exception('输入增量数据为空!!!')
if len(stp1_lst) < 1:
raise Exception('增量数据处理后数据为空!!!')
with open(self._output_p, 'w') as f3:
f3.write('\n'.join(stp1_lst))
f3.flush()
def _real_brand_reg(self, s_name):
tmp_brand = None
"""
attention: 这一步可能出现问题,
比如:东方骆驼,骆驼,
在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,
那么将导致【东方骆驼】变为【骆驼】
"""
for r_b in self.real_brand_set:
lst5 = s_name.split(r_b)
if len(lst5) > 1:
tmp_brand = r_b
break
return tmp_brand
def brand_rewrite(self, line):
line = line.strip()
if line == '':
self.logger.info('empty string!!')
return None
lst1 = line.split('\x01')
if len(lst1) == 3:
s_id, ori_name, s_brand = lst1
s_brand = s_brand.strip()
else:
self.logger.info('brand_rewrite error data: %s' % line)
return None
s_name = tool.s_name_dealing(ori_name)
if len(self.real_brand_set) > 0:
if s_brand not in self.real_brand_set:
ex_brand = self._real_brand_reg(s_name)
tmp_brand = ex_brand if ex_brand != None else s_brand
else:
tmp_brand = s_brand
else:
tmp_brand = s_brand
r_brand = self._brand_exchange(tmp_brand)
if r_brand in self.del_brand_dict:
r_brand = s_name
return '\x01'.join([s_id, ori_name, r_brand])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BrandRegBasic(object):
def __init__(self, base_folder, log_instance):
if not os.path.exists(base_folder):
raise Exception('%s does not exists!' % base_folder)
self._real_brand_p = base_folder + '/real_brand.txt'
if not os.path.exists(self._real_brand_p):
raise Exception('%s does not exists!' % self._real_brand_p)
self._error_p = base_folder + '/error.txt'
if not os.path.exists(self._error_p):
raise Exception('%s does not exists!' % self._error_p)
self._word_dict_p = base_folder + '/word_dict.txt'
if not os.path.exists(self._word_dict_p):
raise Exception('%s does not exists!' % self._word_dict_p)
self._del_brand_p = base_folder + '/del_brand.txt'
if not os.path.exists(self._del_brand_p):
raise Exception('%s does not exists!' % self._del_brand_p)
self.logger = log_instance
self.logger.info('get_real_brand')
self.real_brand_set = self._get_real_brand()
self.logger.info('get_exchange_brand_pair')
self.exchange_brand_pair = self._get_exchange_brand_pair()
self.logger.info('get_del_brand')
self.del_brand_dict = self._get_del_brand()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _brand_pair_checking(self, exchange_dict):
s1 = set(list(exchange_dict.keys()))
s2 = set(list(exchange_dict.values()))
s3 = s1 & s2
if len(s3) > 0:
self.logger.error(
'exchang-brand-pair has error, error brands is: %s' % '\t'.
join(list(s3)))
return False, s3
else:
return True, None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BrandReg(BrandRegBasic):
def __init__(self, base_folder, log_instance, input_lst=None):
super(BrandReg, self).__init__(base_folder, log_instance)
input_file = base_folder + '/dp_brands_result.txt'
if not os.path.exists(input_file):
raise Exception('%s does not exist!' % input_file)
output_file = base_folder + '/dp_brands_result.txt.brandreg'
self._input_p = input_file
self._input_lst = input_lst
self._output_p = output_file
def _brand_exchange(self, ori_brand):
if ori_brand in self.exchange_brand_pair:
return self.exchange_brand_pair[ori_brand]
else:
return ori_brand
def brand_reg(self):
stp1_lst = []
idx = 0
if self._input_lst != None and len(self._input_lst) > 0:
self.logger.info('增量数据处理')
for line in self._input_lst:
idx += 1
if idx % 10000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
elif os.path.exists(self._input_p):
f_input = open(self._input_p)
for line in f_input:
idx += 1
if idx % 100000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
f_input.close()
else:
raise Exception('输入增量数据为空!!!')
if len(stp1_lst) < 1:
raise Exception('增量数据处理后数据为空!!!')
with open(self._output_p, 'w') as f3:
f3.write('\n'.join(stp1_lst))
f3.flush()
def _real_brand_reg(self, s_name):
tmp_brand = None
"""
attention: 这一步可能出现问题,
比如:东方骆驼,骆驼,
在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,
那么将导致【东方骆驼】变为【骆驼】
"""
for r_b in self.real_brand_set:
lst5 = s_name.split(r_b)
if len(lst5) > 1:
tmp_brand = r_b
break
return tmp_brand
def brand_rewrite(self, line):
line = line.strip()
if line == '':
self.logger.info('empty string!!')
return None
lst1 = line.split('\x01')
if len(lst1) == 3:
s_id, ori_name, s_brand = lst1
s_brand = s_brand.strip()
else:
self.logger.info('brand_rewrite error data: %s' % line)
return None
s_name = tool.s_name_dealing(ori_name)
if len(self.real_brand_set) > 0:
if s_brand not in self.real_brand_set:
ex_brand = self._real_brand_reg(s_name)
tmp_brand = ex_brand if ex_brand != None else s_brand
else:
tmp_brand = s_brand
else:
tmp_brand = s_brand
r_brand = self._brand_exchange(tmp_brand)
if r_brand in self.del_brand_dict:
r_brand = s_name
return '\x01'.join([s_id, ori_name, r_brand])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BrandRegBasic(object):
def __init__(self, base_folder, log_instance):
if not os.path.exists(base_folder):
raise Exception('%s does not exists!' % base_folder)
self._real_brand_p = base_folder + '/real_brand.txt'
if not os.path.exists(self._real_brand_p):
raise Exception('%s does not exists!' % self._real_brand_p)
self._error_p = base_folder + '/error.txt'
if not os.path.exists(self._error_p):
raise Exception('%s does not exists!' % self._error_p)
self._word_dict_p = base_folder + '/word_dict.txt'
if not os.path.exists(self._word_dict_p):
raise Exception('%s does not exists!' % self._word_dict_p)
self._del_brand_p = base_folder + '/del_brand.txt'
if not os.path.exists(self._del_brand_p):
raise Exception('%s does not exists!' % self._del_brand_p)
self.logger = log_instance
self.logger.info('get_real_brand')
self.real_brand_set = self._get_real_brand()
self.logger.info('get_exchange_brand_pair')
self.exchange_brand_pair = self._get_exchange_brand_pair()
self.logger.info('get_del_brand')
self.del_brand_dict = self._get_del_brand()
<|reserved_special_token_0|>
def _brand_pair_correction(self, exchange_dict, conflict_brand_set):
tmp_dict = {}
for k, v in exchange_dict.items():
if k in conflict_brand_set:
right_brand = exchange_dict[k]
for k1, v1 in exchange_dict.items():
if v1 == k:
tmp_dict[k1] = right_brand
exchange_dict_ext = {}
for k2, v2 in exchange_dict.items():
if k2 == v2:
continue
if k2 in conflict_brand_set:
continue
if k2 in tmp_dict:
exchange_dict_ext[k2] = tmp_dict[k2]
else:
exchange_dict_ext[k2] = v2
return exchange_dict_ext
def _brand_pair_checking(self, exchange_dict):
s1 = set(list(exchange_dict.keys()))
s2 = set(list(exchange_dict.values()))
s3 = s1 & s2
if len(s3) > 0:
self.logger.error(
'exchang-brand-pair has error, error brands is: %s' % '\t'.
join(list(s3)))
return False, s3
else:
return True, None
def _get_exchange_brand_pair(self):
exchange_dict = {}
def _line_deal(line):
line = line.strip()
if line == '':
return
lst1 = line.split('|')
if len(lst1) != 2:
self.logger.info('wrong brand pair: %s' % line)
return
lst1 = [z.strip() for z in lst1]
if lst1[0] != lst1[1]:
exchange_dict[lst1[0]] = lst1[1]
if not os.path.exists(self._error_p):
self.logger.info('%s does not exist!' % self._real_brand_p)
else:
with open(self._error_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info('len of exchang_brand_pair: %s' % len(
exchange_dict))
if not os.path.exists(self._word_dict_p):
self.logger.info('%s does not exist!' % self._real_brand_p)
else:
with open(self._word_dict_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info('len of exchang_brand_pair: %s' % len(
exchange_dict))
chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)
if not chk_flag:
err_s = 'exchang-brand-pair error: %s' % '\t'.join(list(
conflict_brand_set))
self.logger.error(err_s)
raise Exception(err_s)
return exchange_dict
def _get_del_brand(self):
if not os.path.exists(self._del_brand_p):
raise Exception('%s does not exist!' % self._real_brand_p)
del_dict = {}
with open(self._del_brand_p) as f1:
for line in f1:
line = line.strip()
if line == '':
continue
del_dict[line] = 0
self.logger.info('len of del_brand: %s' % len(del_dict))
return del_dict
class BrandReg(BrandRegBasic):
def __init__(self, base_folder, log_instance, input_lst=None):
super(BrandReg, self).__init__(base_folder, log_instance)
input_file = base_folder + '/dp_brands_result.txt'
if not os.path.exists(input_file):
raise Exception('%s does not exist!' % input_file)
output_file = base_folder + '/dp_brands_result.txt.brandreg'
self._input_p = input_file
self._input_lst = input_lst
self._output_p = output_file
def _brand_exchange(self, ori_brand):
if ori_brand in self.exchange_brand_pair:
return self.exchange_brand_pair[ori_brand]
else:
return ori_brand
def brand_reg(self):
stp1_lst = []
idx = 0
if self._input_lst != None and len(self._input_lst) > 0:
self.logger.info('增量数据处理')
for line in self._input_lst:
idx += 1
if idx % 10000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
elif os.path.exists(self._input_p):
f_input = open(self._input_p)
for line in f_input:
idx += 1
if idx % 100000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
f_input.close()
else:
raise Exception('输入增量数据为空!!!')
if len(stp1_lst) < 1:
raise Exception('增量数据处理后数据为空!!!')
with open(self._output_p, 'w') as f3:
f3.write('\n'.join(stp1_lst))
f3.flush()
def _real_brand_reg(self, s_name):
tmp_brand = None
"""
attention: 这一步可能出现问题,
比如:东方骆驼,骆驼,
在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,
那么将导致【东方骆驼】变为【骆驼】
"""
for r_b in self.real_brand_set:
lst5 = s_name.split(r_b)
if len(lst5) > 1:
tmp_brand = r_b
break
return tmp_brand
def brand_rewrite(self, line):
line = line.strip()
if line == '':
self.logger.info('empty string!!')
return None
lst1 = line.split('\x01')
if len(lst1) == 3:
s_id, ori_name, s_brand = lst1
s_brand = s_brand.strip()
else:
self.logger.info('brand_rewrite error data: %s' % line)
return None
s_name = tool.s_name_dealing(ori_name)
if len(self.real_brand_set) > 0:
if s_brand not in self.real_brand_set:
ex_brand = self._real_brand_reg(s_name)
tmp_brand = ex_brand if ex_brand != None else s_brand
else:
tmp_brand = s_brand
else:
tmp_brand = s_brand
r_brand = self._brand_exchange(tmp_brand)
if r_brand in self.del_brand_dict:
r_brand = s_name
return '\x01'.join([s_id, ori_name, r_brand])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BrandRegBasic(object):
def __init__(self, base_folder, log_instance):
if not os.path.exists(base_folder):
raise Exception('%s does not exists!' % base_folder)
self._real_brand_p = base_folder + '/real_brand.txt'
if not os.path.exists(self._real_brand_p):
raise Exception('%s does not exists!' % self._real_brand_p)
self._error_p = base_folder + '/error.txt'
if not os.path.exists(self._error_p):
raise Exception('%s does not exists!' % self._error_p)
self._word_dict_p = base_folder + '/word_dict.txt'
if not os.path.exists(self._word_dict_p):
raise Exception('%s does not exists!' % self._word_dict_p)
self._del_brand_p = base_folder + '/del_brand.txt'
if not os.path.exists(self._del_brand_p):
raise Exception('%s does not exists!' % self._del_brand_p)
self.logger = log_instance
self.logger.info('get_real_brand')
self.real_brand_set = self._get_real_brand()
self.logger.info('get_exchange_brand_pair')
self.exchange_brand_pair = self._get_exchange_brand_pair()
self.logger.info('get_del_brand')
self.del_brand_dict = self._get_del_brand()
def _get_real_brand(self):
if not os.path.exists(self._real_brand_p):
raise Exception('%s does not exist!' % self._real_brand_p)
real_brand_set = set()
with open(self._real_brand_p) as f1:
for line in f1:
line = line.strip()
if line == '':
continue
real_brand_set.add(line)
self.logger.info('len of real_brand: %s' % len(real_brand_set))
return real_brand_set
def _brand_pair_correction(self, exchange_dict, conflict_brand_set):
tmp_dict = {}
for k, v in exchange_dict.items():
if k in conflict_brand_set:
right_brand = exchange_dict[k]
for k1, v1 in exchange_dict.items():
if v1 == k:
tmp_dict[k1] = right_brand
exchange_dict_ext = {}
for k2, v2 in exchange_dict.items():
if k2 == v2:
continue
if k2 in conflict_brand_set:
continue
if k2 in tmp_dict:
exchange_dict_ext[k2] = tmp_dict[k2]
else:
exchange_dict_ext[k2] = v2
return exchange_dict_ext
def _brand_pair_checking(self, exchange_dict):
s1 = set(list(exchange_dict.keys()))
s2 = set(list(exchange_dict.values()))
s3 = s1 & s2
if len(s3) > 0:
self.logger.error(
'exchang-brand-pair has error, error brands is: %s' % '\t'.
join(list(s3)))
return False, s3
else:
return True, None
def _get_exchange_brand_pair(self):
exchange_dict = {}
def _line_deal(line):
line = line.strip()
if line == '':
return
lst1 = line.split('|')
if len(lst1) != 2:
self.logger.info('wrong brand pair: %s' % line)
return
lst1 = [z.strip() for z in lst1]
if lst1[0] != lst1[1]:
exchange_dict[lst1[0]] = lst1[1]
if not os.path.exists(self._error_p):
self.logger.info('%s does not exist!' % self._real_brand_p)
else:
with open(self._error_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info('len of exchang_brand_pair: %s' % len(
exchange_dict))
if not os.path.exists(self._word_dict_p):
self.logger.info('%s does not exist!' % self._real_brand_p)
else:
with open(self._word_dict_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info('len of exchang_brand_pair: %s' % len(
exchange_dict))
chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)
if not chk_flag:
err_s = 'exchang-brand-pair error: %s' % '\t'.join(list(
conflict_brand_set))
self.logger.error(err_s)
raise Exception(err_s)
return exchange_dict
def _get_del_brand(self):
if not os.path.exists(self._del_brand_p):
raise Exception('%s does not exist!' % self._real_brand_p)
del_dict = {}
with open(self._del_brand_p) as f1:
for line in f1:
line = line.strip()
if line == '':
continue
del_dict[line] = 0
self.logger.info('len of del_brand: %s' % len(del_dict))
return del_dict
class BrandReg(BrandRegBasic):
def __init__(self, base_folder, log_instance, input_lst=None):
super(BrandReg, self).__init__(base_folder, log_instance)
input_file = base_folder + '/dp_brands_result.txt'
if not os.path.exists(input_file):
raise Exception('%s does not exist!' % input_file)
output_file = base_folder + '/dp_brands_result.txt.brandreg'
self._input_p = input_file
self._input_lst = input_lst
self._output_p = output_file
def _brand_exchange(self, ori_brand):
if ori_brand in self.exchange_brand_pair:
return self.exchange_brand_pair[ori_brand]
else:
return ori_brand
def brand_reg(self):
stp1_lst = []
idx = 0
if self._input_lst != None and len(self._input_lst) > 0:
self.logger.info('增量数据处理')
for line in self._input_lst:
idx += 1
if idx % 10000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
elif os.path.exists(self._input_p):
f_input = open(self._input_p)
for line in f_input:
idx += 1
if idx % 100000 == 0:
self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None:
continue
stp1_lst.append(r)
f_input.close()
else:
raise Exception('输入增量数据为空!!!')
if len(stp1_lst) < 1:
raise Exception('增量数据处理后数据为空!!!')
with open(self._output_p, 'w') as f3:
f3.write('\n'.join(stp1_lst))
f3.flush()
def _real_brand_reg(self, s_name):
tmp_brand = None
"""
attention: 这一步可能出现问题,
比如:东方骆驼,骆驼,
在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,
那么将导致【东方骆驼】变为【骆驼】
"""
for r_b in self.real_brand_set:
lst5 = s_name.split(r_b)
if len(lst5) > 1:
tmp_brand = r_b
break
return tmp_brand
def brand_rewrite(self, line):
line = line.strip()
if line == '':
self.logger.info('empty string!!')
return None
lst1 = line.split('\x01')
if len(lst1) == 3:
s_id, ori_name, s_brand = lst1
s_brand = s_brand.strip()
else:
self.logger.info('brand_rewrite error data: %s' % line)
return None
s_name = tool.s_name_dealing(ori_name)
if len(self.real_brand_set) > 0:
if s_brand not in self.real_brand_set:
ex_brand = self._real_brand_reg(s_name)
tmp_brand = ex_brand if ex_brand != None else s_brand
else:
tmp_brand = s_brand
else:
tmp_brand = s_brand
r_brand = self._brand_exchange(tmp_brand)
if r_brand in self.del_brand_dict:
r_brand = s_name
return '\x01'.join([s_id, ori_name, r_brand])
<|reserved_special_token_1|>
#!/usr/bin/env python3
#coding=utf-8
import sys
import os
import tool
class BrandRegBasic(object):
def __init__(self, base_folder, log_instance):
if not os.path.exists(base_folder):
raise Exception("%s does not exists!" % base_folder)
self._real_brand_p = base_folder + "/real_brand.txt"
if not os.path.exists(self._real_brand_p):
raise Exception("%s does not exists!" % self._real_brand_p)
# 注:word_dict.txt和error.txt是一样的功能
# 都是品牌改写,数据格式也一样
self._error_p = base_folder + '/error.txt'
if not os.path.exists(self._error_p):
raise Exception("%s does not exists!" % self._error_p)
self._word_dict_p = base_folder + '/word_dict.txt'
if not os.path.exists(self._word_dict_p):
raise Exception("%s does not exists!" % self._word_dict_p)
self._del_brand_p = base_folder + '/del_brand.txt'
if not os.path.exists(self._del_brand_p):
raise Exception("%s does not exists!" % self._del_brand_p)
self.logger = log_instance
self.logger.info("get_real_brand")
self.real_brand_set = self._get_real_brand()
self.logger.info("get_exchange_brand_pair")
self.exchange_brand_pair = self._get_exchange_brand_pair()
self.logger.info("get_del_brand")
self.del_brand_dict = self._get_del_brand()
#通过真实品牌这个文件获取到真实品牌的元组
def _get_real_brand(self):
# 根据real_brand进行品牌确定
if not os.path.exists(self._real_brand_p):
raise Exception("%s does not exist!" % self._real_brand_p)
real_brand_set = set()
with open(self._real_brand_p) as f1:
for line in f1:
line = line.strip()
if line == "": continue
real_brand_set.add(line)
self.logger.info("len of real_brand: %s" % len(real_brand_set))
return real_brand_set
# no-using
def _brand_pair_correction(self, exchange_dict, conflict_brand_set):
# Tips: {1:2, 2:3, 3:4}这种情况会有错误
tmp_dict = {}
for k, v in exchange_dict.items():
if k in conflict_brand_set:
right_brand = exchange_dict[k]
for k1, v1 in exchange_dict.items():
if v1 == k:
tmp_dict[k1] = right_brand
exchange_dict_ext = {}
for k2, v2 in exchange_dict.items():
if k2 == v2: continue
if k2 in conflict_brand_set: continue
if k2 in tmp_dict:
exchange_dict_ext[k2] = tmp_dict[k2]
else:
exchange_dict_ext[k2] = v2
return exchange_dict_ext
def _brand_pair_checking(self, exchange_dict):
s1 = set(list(exchange_dict.keys()))
s2 = set(list(exchange_dict.values()))
s3 = s1 & s2
if len(s3) > 0:
self.logger.error("exchang-brand-pair has error, error brands is: %s" % "\t".join(list(s3)))
return False, s3
else:
return True, None
def _get_exchange_brand_pair(self):
exchange_dict = {}
def _line_deal(line):
line = line.strip()
if line == "": return
lst1 = line.split("|")
if len(lst1) != 2:
self.logger.info("wrong brand pair: %s" % line)
return
lst1 = [z.strip() for z in lst1]
if lst1[0] != lst1[1]:
exchange_dict[lst1[0]] = lst1[1]
# 根据品牌确定的结果+error.txt获得需要修正的sname结果
if not os.path.exists(self._error_p):
self.logger.info("%s does not exist!" % self._real_brand_p)
else:
with open(self._error_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info("len of exchang_brand_pair: %s" % len(exchange_dict))
if not os.path.exists(self._word_dict_p):
self.logger.info("%s does not exist!" % self._real_brand_p)
else:
with open(self._word_dict_p) as f1:
for line in f1:
_line_deal(line)
self.logger.info("len of exchang_brand_pair: %s" % len(exchange_dict))
# 品牌对检测
chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)
if not chk_flag:
err_s = "exchang-brand-pair error: %s" % "\t".join(list(conflict_brand_set))
self.logger.error(err_s)
raise Exception(err_s)
return exchange_dict
def _get_del_brand(self):
if not os.path.exists(self._del_brand_p):
raise Exception("%s does not exist!" % self._real_brand_p)
del_dict = {}
with open(self._del_brand_p) as f1:
for line in f1:
line = line.strip()
if line == "": continue
del_dict[line] = 0
self.logger.info("len of del_brand: %s" % len(del_dict))
return del_dict
class BrandReg(BrandRegBasic):
def __init__(self, base_folder, log_instance, input_lst=None):
super(BrandReg, self).__init__(base_folder, log_instance)
input_file = base_folder + "/dp_brands_result.txt"
if not os.path.exists(input_file):
raise Exception("%s does not exist!" % input_file)
output_file = base_folder + "/dp_brands_result.txt.brandreg"
self._input_p = input_file
self._input_lst = input_lst
self._output_p = output_file
def _brand_exchange(self, ori_brand):
if ori_brand in self.exchange_brand_pair:
return self.exchange_brand_pair[ori_brand]
else:
return ori_brand
def brand_reg(self):
stp1_lst = []
idx = 0
if self._input_lst != None and len(self._input_lst) > 0:
self.logger.info("增量数据处理")
for line in self._input_lst:
idx += 1
if idx % 10000 == 0: self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None: continue
stp1_lst.append(r)
elif os.path.exists(self._input_p):
f_input = open(self._input_p)
for line in f_input:
idx += 1
if idx % 100000 == 0: self.logger.info(idx)
line = line.strip()
r = self.brand_rewrite(line)
if r is None: continue
stp1_lst.append(r)
f_input.close()
else:
raise Exception("输入增量数据为空!!!")
if len(stp1_lst) < 1:
raise Exception("增量数据处理后数据为空!!!")
with open(self._output_p, 'w') as f3:
f3.write("\n".join(stp1_lst))
f3.flush()
def _real_brand_reg(self, s_name):
tmp_brand = None
"""
attention: 这一步可能出现问题,
比如:东方骆驼,骆驼,
在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,
那么将导致【东方骆驼】变为【骆驼】
"""
for r_b in self.real_brand_set:
lst5 = s_name.split(r_b)
if len(lst5) > 1:
tmp_brand = r_b
break
return tmp_brand
def brand_rewrite(self, line):
line = line.strip()
if line == "":
self.logger.info("empty string!!")
return None
lst1 = line.split("\x01")
if len(lst1) == 3:
s_id, ori_name, s_brand = lst1 #取到相关的数据
s_brand = s_brand.strip()
else:
self.logger.info("brand_rewrite error data: %s" % line)
return None
s_name = tool.s_name_dealing(ori_name)
if len(self.real_brand_set) > 0:
if s_brand not in self.real_brand_set:
ex_brand = self._real_brand_reg(s_name) #匹配过程。如果取到的数据当中没有在数据集中找到相同的品牌,则对这种数据处理一下,在一个数据集中去匹配,进行品牌的归并
tmp_brand = ex_brand if ex_brand != None else s_brand #如果对处理过的品牌就赋值给tmp_brand,否则直接赋值
else:
tmp_brand = s_brand #如果在数据集中找到了直接赋值
else:
tmp_brand = s_brand #如果没有数据集就直接赋值
# brand 修正
r_brand = self._brand_exchange(tmp_brand)
# 错误品牌检测
if r_brand in self.del_brand_dict:
r_brand = s_name
return "\x01".join([s_id, ori_name, r_brand]) #拼接后返回结果
|
flexible
|
{
"blob_id": "845d1251497df61dd2c23241016a049c695ad940",
"index": 9193,
"step-1": "<mask token>\n\n\nclass BrandReg(BrandRegBasic):\n\n def __init__(self, base_folder, log_instance, input_lst=None):\n super(BrandReg, self).__init__(base_folder, log_instance)\n input_file = base_folder + '/dp_brands_result.txt'\n if not os.path.exists(input_file):\n raise Exception('%s does not exist!' % input_file)\n output_file = base_folder + '/dp_brands_result.txt.brandreg'\n self._input_p = input_file\n self._input_lst = input_lst\n self._output_p = output_file\n\n def _brand_exchange(self, ori_brand):\n if ori_brand in self.exchange_brand_pair:\n return self.exchange_brand_pair[ori_brand]\n else:\n return ori_brand\n\n def brand_reg(self):\n stp1_lst = []\n idx = 0\n if self._input_lst != None and len(self._input_lst) > 0:\n self.logger.info('增量数据处理')\n for line in self._input_lst:\n idx += 1\n if idx % 10000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n elif os.path.exists(self._input_p):\n f_input = open(self._input_p)\n for line in f_input:\n idx += 1\n if idx % 100000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n f_input.close()\n else:\n raise Exception('输入增量数据为空!!!')\n if len(stp1_lst) < 1:\n raise Exception('增量数据处理后数据为空!!!')\n with open(self._output_p, 'w') as f3:\n f3.write('\\n'.join(stp1_lst))\n f3.flush()\n\n def _real_brand_reg(self, s_name):\n tmp_brand = None\n \"\"\"\n attention: 这一步可能出现问题, \n 比如:东方骆驼,骆驼, \n 在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,\n 那么将导致【东方骆驼】变为【骆驼】\n \"\"\"\n for r_b in self.real_brand_set:\n lst5 = s_name.split(r_b)\n if len(lst5) > 1:\n tmp_brand = r_b\n break\n return tmp_brand\n\n def brand_rewrite(self, line):\n line = line.strip()\n if line == '':\n self.logger.info('empty string!!')\n return None\n lst1 = line.split('\\x01')\n if len(lst1) == 3:\n s_id, ori_name, s_brand = lst1\n s_brand = s_brand.strip()\n else:\n self.logger.info('brand_rewrite error data: %s' % line)\n return None\n s_name = tool.s_name_dealing(ori_name)\n if len(self.real_brand_set) > 0:\n if s_brand not in self.real_brand_set:\n ex_brand = self._real_brand_reg(s_name)\n tmp_brand = ex_brand if ex_brand != None else s_brand\n else:\n tmp_brand = s_brand\n else:\n tmp_brand = s_brand\n r_brand = self._brand_exchange(tmp_brand)\n if r_brand in self.del_brand_dict:\n r_brand = s_name\n return '\\x01'.join([s_id, ori_name, r_brand])\n",
"step-2": "<mask token>\n\n\nclass BrandRegBasic(object):\n\n def __init__(self, base_folder, log_instance):\n if not os.path.exists(base_folder):\n raise Exception('%s does not exists!' % base_folder)\n self._real_brand_p = base_folder + '/real_brand.txt'\n if not os.path.exists(self._real_brand_p):\n raise Exception('%s does not exists!' % self._real_brand_p)\n self._error_p = base_folder + '/error.txt'\n if not os.path.exists(self._error_p):\n raise Exception('%s does not exists!' % self._error_p)\n self._word_dict_p = base_folder + '/word_dict.txt'\n if not os.path.exists(self._word_dict_p):\n raise Exception('%s does not exists!' % self._word_dict_p)\n self._del_brand_p = base_folder + '/del_brand.txt'\n if not os.path.exists(self._del_brand_p):\n raise Exception('%s does not exists!' % self._del_brand_p)\n self.logger = log_instance\n self.logger.info('get_real_brand')\n self.real_brand_set = self._get_real_brand()\n self.logger.info('get_exchange_brand_pair')\n self.exchange_brand_pair = self._get_exchange_brand_pair()\n self.logger.info('get_del_brand')\n self.del_brand_dict = self._get_del_brand()\n <mask token>\n <mask token>\n\n def _brand_pair_checking(self, exchange_dict):\n s1 = set(list(exchange_dict.keys()))\n s2 = set(list(exchange_dict.values()))\n s3 = s1 & s2\n if len(s3) > 0:\n self.logger.error(\n 'exchang-brand-pair has error, error brands is: %s' % '\\t'.\n join(list(s3)))\n return False, s3\n else:\n return True, None\n <mask token>\n <mask token>\n\n\nclass BrandReg(BrandRegBasic):\n\n def __init__(self, base_folder, log_instance, input_lst=None):\n super(BrandReg, self).__init__(base_folder, log_instance)\n input_file = base_folder + '/dp_brands_result.txt'\n if not os.path.exists(input_file):\n raise Exception('%s does not exist!' % input_file)\n output_file = base_folder + '/dp_brands_result.txt.brandreg'\n self._input_p = input_file\n self._input_lst = input_lst\n self._output_p = output_file\n\n def _brand_exchange(self, ori_brand):\n if ori_brand in self.exchange_brand_pair:\n return self.exchange_brand_pair[ori_brand]\n else:\n return ori_brand\n\n def brand_reg(self):\n stp1_lst = []\n idx = 0\n if self._input_lst != None and len(self._input_lst) > 0:\n self.logger.info('增量数据处理')\n for line in self._input_lst:\n idx += 1\n if idx % 10000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n elif os.path.exists(self._input_p):\n f_input = open(self._input_p)\n for line in f_input:\n idx += 1\n if idx % 100000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n f_input.close()\n else:\n raise Exception('输入增量数据为空!!!')\n if len(stp1_lst) < 1:\n raise Exception('增量数据处理后数据为空!!!')\n with open(self._output_p, 'w') as f3:\n f3.write('\\n'.join(stp1_lst))\n f3.flush()\n\n def _real_brand_reg(self, s_name):\n tmp_brand = None\n \"\"\"\n attention: 这一步可能出现问题, \n 比如:东方骆驼,骆驼, \n 在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,\n 那么将导致【东方骆驼】变为【骆驼】\n \"\"\"\n for r_b in self.real_brand_set:\n lst5 = s_name.split(r_b)\n if len(lst5) > 1:\n tmp_brand = r_b\n break\n return tmp_brand\n\n def brand_rewrite(self, line):\n line = line.strip()\n if line == '':\n self.logger.info('empty string!!')\n return None\n lst1 = line.split('\\x01')\n if len(lst1) == 3:\n s_id, ori_name, s_brand = lst1\n s_brand = s_brand.strip()\n else:\n self.logger.info('brand_rewrite error data: %s' % line)\n return None\n s_name = tool.s_name_dealing(ori_name)\n if len(self.real_brand_set) > 0:\n if s_brand not in self.real_brand_set:\n ex_brand = self._real_brand_reg(s_name)\n tmp_brand = ex_brand if ex_brand != None else s_brand\n else:\n tmp_brand = s_brand\n else:\n tmp_brand = s_brand\n r_brand = self._brand_exchange(tmp_brand)\n if r_brand in self.del_brand_dict:\n r_brand = s_name\n return '\\x01'.join([s_id, ori_name, r_brand])\n",
"step-3": "<mask token>\n\n\nclass BrandRegBasic(object):\n\n def __init__(self, base_folder, log_instance):\n if not os.path.exists(base_folder):\n raise Exception('%s does not exists!' % base_folder)\n self._real_brand_p = base_folder + '/real_brand.txt'\n if not os.path.exists(self._real_brand_p):\n raise Exception('%s does not exists!' % self._real_brand_p)\n self._error_p = base_folder + '/error.txt'\n if not os.path.exists(self._error_p):\n raise Exception('%s does not exists!' % self._error_p)\n self._word_dict_p = base_folder + '/word_dict.txt'\n if not os.path.exists(self._word_dict_p):\n raise Exception('%s does not exists!' % self._word_dict_p)\n self._del_brand_p = base_folder + '/del_brand.txt'\n if not os.path.exists(self._del_brand_p):\n raise Exception('%s does not exists!' % self._del_brand_p)\n self.logger = log_instance\n self.logger.info('get_real_brand')\n self.real_brand_set = self._get_real_brand()\n self.logger.info('get_exchange_brand_pair')\n self.exchange_brand_pair = self._get_exchange_brand_pair()\n self.logger.info('get_del_brand')\n self.del_brand_dict = self._get_del_brand()\n <mask token>\n\n def _brand_pair_correction(self, exchange_dict, conflict_brand_set):\n tmp_dict = {}\n for k, v in exchange_dict.items():\n if k in conflict_brand_set:\n right_brand = exchange_dict[k]\n for k1, v1 in exchange_dict.items():\n if v1 == k:\n tmp_dict[k1] = right_brand\n exchange_dict_ext = {}\n for k2, v2 in exchange_dict.items():\n if k2 == v2:\n continue\n if k2 in conflict_brand_set:\n continue\n if k2 in tmp_dict:\n exchange_dict_ext[k2] = tmp_dict[k2]\n else:\n exchange_dict_ext[k2] = v2\n return exchange_dict_ext\n\n def _brand_pair_checking(self, exchange_dict):\n s1 = set(list(exchange_dict.keys()))\n s2 = set(list(exchange_dict.values()))\n s3 = s1 & s2\n if len(s3) > 0:\n self.logger.error(\n 'exchang-brand-pair has error, error brands is: %s' % '\\t'.\n join(list(s3)))\n return False, s3\n else:\n return True, None\n\n def _get_exchange_brand_pair(self):\n exchange_dict = {}\n\n def _line_deal(line):\n line = line.strip()\n if line == '':\n return\n lst1 = line.split('|')\n if len(lst1) != 2:\n self.logger.info('wrong brand pair: %s' % line)\n return\n lst1 = [z.strip() for z in lst1]\n if lst1[0] != lst1[1]:\n exchange_dict[lst1[0]] = lst1[1]\n if not os.path.exists(self._error_p):\n self.logger.info('%s does not exist!' % self._real_brand_p)\n else:\n with open(self._error_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info('len of exchang_brand_pair: %s' % len(\n exchange_dict))\n if not os.path.exists(self._word_dict_p):\n self.logger.info('%s does not exist!' % self._real_brand_p)\n else:\n with open(self._word_dict_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info('len of exchang_brand_pair: %s' % len(\n exchange_dict))\n chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)\n if not chk_flag:\n err_s = 'exchang-brand-pair error: %s' % '\\t'.join(list(\n conflict_brand_set))\n self.logger.error(err_s)\n raise Exception(err_s)\n return exchange_dict\n\n def _get_del_brand(self):\n if not os.path.exists(self._del_brand_p):\n raise Exception('%s does not exist!' % self._real_brand_p)\n del_dict = {}\n with open(self._del_brand_p) as f1:\n for line in f1:\n line = line.strip()\n if line == '':\n continue\n del_dict[line] = 0\n self.logger.info('len of del_brand: %s' % len(del_dict))\n return del_dict\n\n\nclass BrandReg(BrandRegBasic):\n\n def __init__(self, base_folder, log_instance, input_lst=None):\n super(BrandReg, self).__init__(base_folder, log_instance)\n input_file = base_folder + '/dp_brands_result.txt'\n if not os.path.exists(input_file):\n raise Exception('%s does not exist!' % input_file)\n output_file = base_folder + '/dp_brands_result.txt.brandreg'\n self._input_p = input_file\n self._input_lst = input_lst\n self._output_p = output_file\n\n def _brand_exchange(self, ori_brand):\n if ori_brand in self.exchange_brand_pair:\n return self.exchange_brand_pair[ori_brand]\n else:\n return ori_brand\n\n def brand_reg(self):\n stp1_lst = []\n idx = 0\n if self._input_lst != None and len(self._input_lst) > 0:\n self.logger.info('增量数据处理')\n for line in self._input_lst:\n idx += 1\n if idx % 10000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n elif os.path.exists(self._input_p):\n f_input = open(self._input_p)\n for line in f_input:\n idx += 1\n if idx % 100000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n f_input.close()\n else:\n raise Exception('输入增量数据为空!!!')\n if len(stp1_lst) < 1:\n raise Exception('增量数据处理后数据为空!!!')\n with open(self._output_p, 'w') as f3:\n f3.write('\\n'.join(stp1_lst))\n f3.flush()\n\n def _real_brand_reg(self, s_name):\n tmp_brand = None\n \"\"\"\n attention: 这一步可能出现问题, \n 比如:东方骆驼,骆驼, \n 在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,\n 那么将导致【东方骆驼】变为【骆驼】\n \"\"\"\n for r_b in self.real_brand_set:\n lst5 = s_name.split(r_b)\n if len(lst5) > 1:\n tmp_brand = r_b\n break\n return tmp_brand\n\n def brand_rewrite(self, line):\n line = line.strip()\n if line == '':\n self.logger.info('empty string!!')\n return None\n lst1 = line.split('\\x01')\n if len(lst1) == 3:\n s_id, ori_name, s_brand = lst1\n s_brand = s_brand.strip()\n else:\n self.logger.info('brand_rewrite error data: %s' % line)\n return None\n s_name = tool.s_name_dealing(ori_name)\n if len(self.real_brand_set) > 0:\n if s_brand not in self.real_brand_set:\n ex_brand = self._real_brand_reg(s_name)\n tmp_brand = ex_brand if ex_brand != None else s_brand\n else:\n tmp_brand = s_brand\n else:\n tmp_brand = s_brand\n r_brand = self._brand_exchange(tmp_brand)\n if r_brand in self.del_brand_dict:\n r_brand = s_name\n return '\\x01'.join([s_id, ori_name, r_brand])\n",
"step-4": "<mask token>\n\n\nclass BrandRegBasic(object):\n\n def __init__(self, base_folder, log_instance):\n if not os.path.exists(base_folder):\n raise Exception('%s does not exists!' % base_folder)\n self._real_brand_p = base_folder + '/real_brand.txt'\n if not os.path.exists(self._real_brand_p):\n raise Exception('%s does not exists!' % self._real_brand_p)\n self._error_p = base_folder + '/error.txt'\n if not os.path.exists(self._error_p):\n raise Exception('%s does not exists!' % self._error_p)\n self._word_dict_p = base_folder + '/word_dict.txt'\n if not os.path.exists(self._word_dict_p):\n raise Exception('%s does not exists!' % self._word_dict_p)\n self._del_brand_p = base_folder + '/del_brand.txt'\n if not os.path.exists(self._del_brand_p):\n raise Exception('%s does not exists!' % self._del_brand_p)\n self.logger = log_instance\n self.logger.info('get_real_brand')\n self.real_brand_set = self._get_real_brand()\n self.logger.info('get_exchange_brand_pair')\n self.exchange_brand_pair = self._get_exchange_brand_pair()\n self.logger.info('get_del_brand')\n self.del_brand_dict = self._get_del_brand()\n\n def _get_real_brand(self):\n if not os.path.exists(self._real_brand_p):\n raise Exception('%s does not exist!' % self._real_brand_p)\n real_brand_set = set()\n with open(self._real_brand_p) as f1:\n for line in f1:\n line = line.strip()\n if line == '':\n continue\n real_brand_set.add(line)\n self.logger.info('len of real_brand: %s' % len(real_brand_set))\n return real_brand_set\n\n def _brand_pair_correction(self, exchange_dict, conflict_brand_set):\n tmp_dict = {}\n for k, v in exchange_dict.items():\n if k in conflict_brand_set:\n right_brand = exchange_dict[k]\n for k1, v1 in exchange_dict.items():\n if v1 == k:\n tmp_dict[k1] = right_brand\n exchange_dict_ext = {}\n for k2, v2 in exchange_dict.items():\n if k2 == v2:\n continue\n if k2 in conflict_brand_set:\n continue\n if k2 in tmp_dict:\n exchange_dict_ext[k2] = tmp_dict[k2]\n else:\n exchange_dict_ext[k2] = v2\n return exchange_dict_ext\n\n def _brand_pair_checking(self, exchange_dict):\n s1 = set(list(exchange_dict.keys()))\n s2 = set(list(exchange_dict.values()))\n s3 = s1 & s2\n if len(s3) > 0:\n self.logger.error(\n 'exchang-brand-pair has error, error brands is: %s' % '\\t'.\n join(list(s3)))\n return False, s3\n else:\n return True, None\n\n def _get_exchange_brand_pair(self):\n exchange_dict = {}\n\n def _line_deal(line):\n line = line.strip()\n if line == '':\n return\n lst1 = line.split('|')\n if len(lst1) != 2:\n self.logger.info('wrong brand pair: %s' % line)\n return\n lst1 = [z.strip() for z in lst1]\n if lst1[0] != lst1[1]:\n exchange_dict[lst1[0]] = lst1[1]\n if not os.path.exists(self._error_p):\n self.logger.info('%s does not exist!' % self._real_brand_p)\n else:\n with open(self._error_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info('len of exchang_brand_pair: %s' % len(\n exchange_dict))\n if not os.path.exists(self._word_dict_p):\n self.logger.info('%s does not exist!' % self._real_brand_p)\n else:\n with open(self._word_dict_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info('len of exchang_brand_pair: %s' % len(\n exchange_dict))\n chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)\n if not chk_flag:\n err_s = 'exchang-brand-pair error: %s' % '\\t'.join(list(\n conflict_brand_set))\n self.logger.error(err_s)\n raise Exception(err_s)\n return exchange_dict\n\n def _get_del_brand(self):\n if not os.path.exists(self._del_brand_p):\n raise Exception('%s does not exist!' % self._real_brand_p)\n del_dict = {}\n with open(self._del_brand_p) as f1:\n for line in f1:\n line = line.strip()\n if line == '':\n continue\n del_dict[line] = 0\n self.logger.info('len of del_brand: %s' % len(del_dict))\n return del_dict\n\n\nclass BrandReg(BrandRegBasic):\n\n def __init__(self, base_folder, log_instance, input_lst=None):\n super(BrandReg, self).__init__(base_folder, log_instance)\n input_file = base_folder + '/dp_brands_result.txt'\n if not os.path.exists(input_file):\n raise Exception('%s does not exist!' % input_file)\n output_file = base_folder + '/dp_brands_result.txt.brandreg'\n self._input_p = input_file\n self._input_lst = input_lst\n self._output_p = output_file\n\n def _brand_exchange(self, ori_brand):\n if ori_brand in self.exchange_brand_pair:\n return self.exchange_brand_pair[ori_brand]\n else:\n return ori_brand\n\n def brand_reg(self):\n stp1_lst = []\n idx = 0\n if self._input_lst != None and len(self._input_lst) > 0:\n self.logger.info('增量数据处理')\n for line in self._input_lst:\n idx += 1\n if idx % 10000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n elif os.path.exists(self._input_p):\n f_input = open(self._input_p)\n for line in f_input:\n idx += 1\n if idx % 100000 == 0:\n self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None:\n continue\n stp1_lst.append(r)\n f_input.close()\n else:\n raise Exception('输入增量数据为空!!!')\n if len(stp1_lst) < 1:\n raise Exception('增量数据处理后数据为空!!!')\n with open(self._output_p, 'w') as f3:\n f3.write('\\n'.join(stp1_lst))\n f3.flush()\n\n def _real_brand_reg(self, s_name):\n tmp_brand = None\n \"\"\"\n attention: 这一步可能出现问题, \n 比如:东方骆驼,骆驼, \n 在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,\n 那么将导致【东方骆驼】变为【骆驼】\n \"\"\"\n for r_b in self.real_brand_set:\n lst5 = s_name.split(r_b)\n if len(lst5) > 1:\n tmp_brand = r_b\n break\n return tmp_brand\n\n def brand_rewrite(self, line):\n line = line.strip()\n if line == '':\n self.logger.info('empty string!!')\n return None\n lst1 = line.split('\\x01')\n if len(lst1) == 3:\n s_id, ori_name, s_brand = lst1\n s_brand = s_brand.strip()\n else:\n self.logger.info('brand_rewrite error data: %s' % line)\n return None\n s_name = tool.s_name_dealing(ori_name)\n if len(self.real_brand_set) > 0:\n if s_brand not in self.real_brand_set:\n ex_brand = self._real_brand_reg(s_name)\n tmp_brand = ex_brand if ex_brand != None else s_brand\n else:\n tmp_brand = s_brand\n else:\n tmp_brand = s_brand\n r_brand = self._brand_exchange(tmp_brand)\n if r_brand in self.del_brand_dict:\n r_brand = s_name\n return '\\x01'.join([s_id, ori_name, r_brand])\n",
"step-5": "#!/usr/bin/env python3\n#coding=utf-8\n\nimport sys\nimport os\nimport tool\n\nclass BrandRegBasic(object):\n def __init__(self, base_folder, log_instance):\n if not os.path.exists(base_folder):\n raise Exception(\"%s does not exists!\" % base_folder)\n self._real_brand_p = base_folder + \"/real_brand.txt\"\n if not os.path.exists(self._real_brand_p):\n raise Exception(\"%s does not exists!\" % self._real_brand_p)\n # 注:word_dict.txt和error.txt是一样的功能\n # 都是品牌改写,数据格式也一样\n self._error_p = base_folder + '/error.txt'\n if not os.path.exists(self._error_p):\n raise Exception(\"%s does not exists!\" % self._error_p)\n self._word_dict_p = base_folder + '/word_dict.txt'\n if not os.path.exists(self._word_dict_p):\n raise Exception(\"%s does not exists!\" % self._word_dict_p)\n self._del_brand_p = base_folder + '/del_brand.txt'\n if not os.path.exists(self._del_brand_p):\n raise Exception(\"%s does not exists!\" % self._del_brand_p)\n self.logger = log_instance\n self.logger.info(\"get_real_brand\")\n self.real_brand_set = self._get_real_brand()\n self.logger.info(\"get_exchange_brand_pair\")\n self.exchange_brand_pair = self._get_exchange_brand_pair()\n self.logger.info(\"get_del_brand\")\n self.del_brand_dict = self._get_del_brand()\n\n #通过真实品牌这个文件获取到真实品牌的元组\n def _get_real_brand(self):\n # 根据real_brand进行品牌确定\n if not os.path.exists(self._real_brand_p):\n raise Exception(\"%s does not exist!\" % self._real_brand_p)\n\n real_brand_set = set()\n with open(self._real_brand_p) as f1:\n for line in f1:\n line = line.strip()\n if line == \"\": continue\n real_brand_set.add(line)\n\n self.logger.info(\"len of real_brand: %s\" % len(real_brand_set))\n return real_brand_set\n\n # no-using\n def _brand_pair_correction(self, exchange_dict, conflict_brand_set):\n # Tips: {1:2, 2:3, 3:4}这种情况会有错误\n\n tmp_dict = {}\n for k, v in exchange_dict.items():\n if k in conflict_brand_set:\n right_brand = exchange_dict[k]\n for k1, v1 in exchange_dict.items():\n if v1 == k:\n tmp_dict[k1] = right_brand\n\n exchange_dict_ext = {}\n for k2, v2 in exchange_dict.items():\n if k2 == v2: continue\n if k2 in conflict_brand_set: continue\n if k2 in tmp_dict:\n exchange_dict_ext[k2] = tmp_dict[k2]\n else:\n exchange_dict_ext[k2] = v2\n\n return exchange_dict_ext\n\n def _brand_pair_checking(self, exchange_dict):\n s1 = set(list(exchange_dict.keys()))\n s2 = set(list(exchange_dict.values()))\n s3 = s1 & s2\n if len(s3) > 0:\n self.logger.error(\"exchang-brand-pair has error, error brands is: %s\" % \"\\t\".join(list(s3)))\n return False, s3\n else:\n return True, None\n\n def _get_exchange_brand_pair(self):\n exchange_dict = {}\n def _line_deal(line):\n line = line.strip()\n if line == \"\": return\n lst1 = line.split(\"|\")\n if len(lst1) != 2:\n self.logger.info(\"wrong brand pair: %s\" % line)\n return\n lst1 = [z.strip() for z in lst1]\n if lst1[0] != lst1[1]:\n exchange_dict[lst1[0]] = lst1[1]\n\n # 根据品牌确定的结果+error.txt获得需要修正的sname结果\n if not os.path.exists(self._error_p):\n self.logger.info(\"%s does not exist!\" % self._real_brand_p)\n else:\n with open(self._error_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info(\"len of exchang_brand_pair: %s\" % len(exchange_dict))\n\n if not os.path.exists(self._word_dict_p):\n self.logger.info(\"%s does not exist!\" % self._real_brand_p)\n else:\n with open(self._word_dict_p) as f1:\n for line in f1:\n _line_deal(line)\n self.logger.info(\"len of exchang_brand_pair: %s\" % len(exchange_dict))\n\n # 品牌对检测\n chk_flag, conflict_brand_set = self._brand_pair_checking(exchange_dict)\n if not chk_flag:\n err_s = \"exchang-brand-pair error: %s\" % \"\\t\".join(list(conflict_brand_set))\n self.logger.error(err_s)\n raise Exception(err_s)\n \n return exchange_dict\n\n def _get_del_brand(self):\n if not os.path.exists(self._del_brand_p):\n raise Exception(\"%s does not exist!\" % self._real_brand_p)\n\n del_dict = {}\n with open(self._del_brand_p) as f1:\n for line in f1:\n line = line.strip()\n if line == \"\": continue\n del_dict[line] = 0\n self.logger.info(\"len of del_brand: %s\" % len(del_dict))\n return del_dict\n\nclass BrandReg(BrandRegBasic):\n def __init__(self, base_folder, log_instance, input_lst=None):\n super(BrandReg, self).__init__(base_folder, log_instance)\n input_file = base_folder + \"/dp_brands_result.txt\"\n if not os.path.exists(input_file):\n raise Exception(\"%s does not exist!\" % input_file)\n\n output_file = base_folder + \"/dp_brands_result.txt.brandreg\"\n self._input_p = input_file\n self._input_lst = input_lst\n self._output_p = output_file\n\n def _brand_exchange(self, ori_brand):\n if ori_brand in self.exchange_brand_pair:\n return self.exchange_brand_pair[ori_brand]\n else:\n return ori_brand\n\n def brand_reg(self):\n stp1_lst = []\n idx = 0\n if self._input_lst != None and len(self._input_lst) > 0:\n self.logger.info(\"增量数据处理\")\n for line in self._input_lst:\n idx += 1\n if idx % 10000 == 0: self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None: continue\n stp1_lst.append(r)\n elif os.path.exists(self._input_p):\n f_input = open(self._input_p)\n for line in f_input:\n idx += 1\n if idx % 100000 == 0: self.logger.info(idx)\n line = line.strip()\n r = self.brand_rewrite(line)\n if r is None: continue\n stp1_lst.append(r)\n\n f_input.close()\n else:\n raise Exception(\"输入增量数据为空!!!\")\n\n if len(stp1_lst) < 1:\n raise Exception(\"增量数据处理后数据为空!!!\")\n\n with open(self._output_p, 'w') as f3:\n f3.write(\"\\n\".join(stp1_lst))\n f3.flush()\n\n def _real_brand_reg(self, s_name):\n tmp_brand = None\n \"\"\"\n attention: 这一步可能出现问题, \n 比如:东方骆驼,骆驼, \n 在real_brand.txt文件中,如果【骆驼】出现在【东方骆驼】前面,\n 那么将导致【东方骆驼】变为【骆驼】\n \"\"\"\n for r_b in self.real_brand_set:\n lst5 = s_name.split(r_b)\n if len(lst5) > 1:\n tmp_brand = r_b\n break\n\n return tmp_brand\n\n def brand_rewrite(self, line):\n line = line.strip()\n if line == \"\":\n self.logger.info(\"empty string!!\")\n return None\n lst1 = line.split(\"\\x01\")\n if len(lst1) == 3:\n s_id, ori_name, s_brand = lst1 #取到相关的数据\n s_brand = s_brand.strip()\n else:\n self.logger.info(\"brand_rewrite error data: %s\" % line)\n return None\n\n s_name = tool.s_name_dealing(ori_name)\n if len(self.real_brand_set) > 0:\n if s_brand not in self.real_brand_set:\n ex_brand = self._real_brand_reg(s_name) #匹配过程。如果取到的数据当中没有在数据集中找到相同的品牌,则对这种数据处理一下,在一个数据集中去匹配,进行品牌的归并\n tmp_brand = ex_brand if ex_brand != None else s_brand #如果对处理过的品牌就赋值给tmp_brand,否则直接赋值\n else:\n tmp_brand = s_brand #如果在数据集中找到了直接赋值\n else:\n tmp_brand = s_brand #如果没有数据集就直接赋值\n # brand 修正\n r_brand = self._brand_exchange(tmp_brand)\n # 错误品牌检测\n if r_brand in self.del_brand_dict:\n r_brand = s_name\n\n return \"\\x01\".join([s_id, ori_name, r_brand]) #拼接后返回结果\n\n\n\n\n\n\n",
"step-ids": [
6,
9,
12,
13,
15
]
}
|
[
6,
9,
12,
13,
15
] |
"""
-*- coding:utf-8 -*-
@ Time : 14:05
@ Name : handle_ini_file.py
@ Author : xiaoyin_ing
@ Email : [email protected]
@ Software : PyCharm
...
"""
from configparser import ConfigParser
from Common.handle_path import conf_dir
import os
class HandleConfig(ConfigParser):
def __init__(self, ini_file_neme):
super().__init__()
self.ini_file_neme = ini_file_neme
def red_conf__(self):
file_path = os.path.join(conf_dir, self.ini_file_neme)
self.read(file_path, encoding="utf-8")
red_conf = HandleConfig("xiaoyin.ini")
red_conf.red_conf__()
# 日志模块用到的属性
log_data_list = [red_conf.get("log", "log_name"), red_conf.get("log", "log_level"), red_conf.getboolean("log", "file")]
# print(log_data_list)
|
normal
|
{
"blob_id": "01e60123ad87d9ff49812fe3a6f5d55bc85921c5",
"index": 4071,
"step-1": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\n<mask token>\nred_conf.red_conf__()\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\nred_conf = HandleConfig('xiaoyin.ini')\nred_conf.red_conf__()\nlog_data_list = [red_conf.get('log', 'log_name'), red_conf.get('log',\n 'log_level'), red_conf.getboolean('log', 'file')]\n",
"step-4": "<mask token>\nfrom configparser import ConfigParser\nfrom Common.handle_path import conf_dir\nimport os\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\nred_conf = HandleConfig('xiaoyin.ini')\nred_conf.red_conf__()\nlog_data_list = [red_conf.get('log', 'log_name'), red_conf.get('log',\n 'log_level'), red_conf.getboolean('log', 'file')]\n",
"step-5": "\"\"\"\n-*- coding:utf-8 -*-\n@ Time : 14:05\n@ Name : handle_ini_file.py\n@ Author : xiaoyin_ing\n@ Email : [email protected]\n@ Software : PyCharm\n ...\n \n\"\"\"\nfrom configparser import ConfigParser\nfrom Common.handle_path import conf_dir\nimport os\n\n\nclass HandleConfig(ConfigParser):\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding=\"utf-8\")\n\n\nred_conf = HandleConfig(\"xiaoyin.ini\")\nred_conf.red_conf__()\n\n# 日志模块用到的属性\nlog_data_list = [red_conf.get(\"log\", \"log_name\"), red_conf.get(\"log\", \"log_level\"), red_conf.getboolean(\"log\", \"file\")]\n# print(log_data_list)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = (
'https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched'
)
html = requests.get(url).content
df_list = pandas.read_html(html)
<|reserved_special_token_1|>
import pandas
import requests
import lxml
url = (
'https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched'
)
html = requests.get(url).content
df_list = pandas.read_html(html)
<|reserved_special_token_1|>
# Import packages
import pandas
import requests
import lxml
# Get page content
url = "https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched"
html = requests.get(url).content
df_list = pandas.read_html(html)
# Pull relevant URLs
|
flexible
|
{
"blob_id": "d46035699bee1ad9a75ea251c2c3ab8817d6a740",
"index": 4343,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurl = (\n 'https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched'\n )\nhtml = requests.get(url).content\ndf_list = pandas.read_html(html)\n",
"step-3": "import pandas\nimport requests\nimport lxml\nurl = (\n 'https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched'\n )\nhtml = requests.get(url).content\ndf_list = pandas.read_html(html)\n",
"step-4": "# Import packages\nimport pandas\nimport requests\nimport lxml\n\n# Get page content\nurl = \"https://archive.fantasysports.yahoo.com/nfl/2017/189499?lhst=sched#lhstsched\"\nhtml = requests.get(url).content\ndf_list = pandas.read_html(html)\n\n# Pull relevant URLs\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CurationLists(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
<|reserved_special_token_0|>
def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info(
'Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(
' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'
.format(poly_taxa_count, len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.
format(len(rogue_gids)))
self.logger.info(
'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info(
'Creating curation lists and pseudo-trees of rogue GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,
gtdb_prev_sp_clusters, gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))
)
self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters
)
self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,
gtdb_decorate_table)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CurationLists(object):
<|reserved_special_token_0|>
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
<|reserved_special_token_0|>
def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info(
'Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(
' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'
.format(poly_taxa_count, len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.
format(len(rogue_gids)))
self.logger.info(
'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info(
'Creating curation lists and pseudo-trees of rogue GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,
gtdb_prev_sp_clusters, gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))
)
self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters
)
self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,
gtdb_decorate_table)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CurationLists(object):
<|reserved_special_token_0|>
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,
gtdb_prev_sp_clusters):
"""New GTDB representatives."""
self.logger.info('Identifying previous GTDB representatives.')
prev_rids = set()
with open(gtdb_prev_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
prev_rids.add(rid)
self.logger.info(' - identified {:,} previous GTDB representatives.'
.format(len(prev_rids)))
self.logger.info('Identifying current GTDB representatives.')
cur_rids = set()
with open(gtdb_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
cur_rids.add(rid)
self.logger.info(' - identified {:,} current GTDB representatives.'
.format(len(cur_rids)))
self.logger.info(
'Creating curation list and pseudo-tree of new GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_new_reps.{self.domain}.lst')
fout = open(out_file, 'w')
new_rids = set()
for rid in cur_rids:
if rid in domain_gids and rid not in prev_rids:
fout.write('{}\n'.format(rid))
new_rids.add(rid)
fout.close()
self.logger.info(' - identified {:,} new GTDB representatives.'.
format(len(new_rids)))
self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))
def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info(
'Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(
' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'
.format(poly_taxa_count, len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.
format(len(rogue_gids)))
self.logger.info(
'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info(
'Creating curation lists and pseudo-trees of rogue GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,
gtdb_prev_sp_clusters, gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))
)
self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters
)
self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,
gtdb_decorate_table)
<|reserved_special_token_1|>
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2020'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import sys
import argparse
import re
import datetime
import os
import logging
import time
import math
from collections import defaultdict, namedtuple
from biolib.common import canonical_gid
class CurationLists(object):
"""Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names."""
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,
gtdb_prev_sp_clusters):
"""New GTDB representatives."""
self.logger.info('Identifying previous GTDB representatives.')
prev_rids = set()
with open(gtdb_prev_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
prev_rids.add(rid)
self.logger.info(' - identified {:,} previous GTDB representatives.'
.format(len(prev_rids)))
self.logger.info('Identifying current GTDB representatives.')
cur_rids = set()
with open(gtdb_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
cur_rids.add(rid)
self.logger.info(' - identified {:,} current GTDB representatives.'
.format(len(cur_rids)))
self.logger.info(
'Creating curation list and pseudo-tree of new GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_new_reps.{self.domain}.lst')
fout = open(out_file, 'w')
new_rids = set()
for rid in cur_rids:
if rid in domain_gids and rid not in prev_rids:
fout.write('{}\n'.format(rid))
new_rids.add(rid)
fout.close()
self.logger.info(' - identified {:,} new GTDB representatives.'.
format(len(new_rids)))
self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))
def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info(
'Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(
' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'
.format(poly_taxa_count, len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.
format(len(rogue_gids)))
self.logger.info(
'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info(
'Creating curation lists and pseudo-trees of rogue GTDB representatives.'
)
out_file = os.path.join(self.output_dir,
f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,
gtdb_prev_sp_clusters, gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))
)
self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters
)
self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,
gtdb_decorate_table)
<|reserved_special_token_1|>
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2020'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import sys
import argparse
import re
import datetime
import os
import logging
import time
import math
from collections import defaultdict, namedtuple
from biolib.common import canonical_gid
class CurationLists(object):
"""Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names."""
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
def new_gtdb_reps(self,
domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters):
"""New GTDB representatives."""
self.logger.info('Identifying previous GTDB representatives.')
prev_rids = set()
with open(gtdb_prev_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
prev_rids.add(rid)
self.logger.info(' - identified {:,} previous GTDB representatives.'.format(
len(prev_rids)))
self.logger.info('Identifying current GTDB representatives.')
cur_rids = set()
with open(gtdb_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
cur_rids.add(rid)
self.logger.info(' - identified {:,} current GTDB representatives.'.format(
len(cur_rids)))
self.logger.info('Creating curation list and pseudo-tree of new GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_new_reps.{self.domain}.lst')
fout = open(out_file, 'w')
new_rids = set()
for rid in cur_rids:
if rid in domain_gids and rid not in prev_rids:
fout.write('{}\n'.format(rid))
new_rids.add(rid)
fout.close()
self.logger.info(' - identified {:,} new GTDB representatives.'.format(
len(new_rids)))
self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))
def poly_rogue_gtdb_reps(self,
domain_gids,
taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info('Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'.format(
poly_taxa_count,
len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.format(
len(rogue_gids)))
self.logger.info('Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info('Creating curation lists and pseudo-trees of rogue GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self,
gtdb_init_taxonomy,
gtdb_sp_clusters,
gtdb_prev_sp_clusters,
gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
# get genomes
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(
len(domain_gids)))
# new GTDB representatives
self.new_gtdb_reps(domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters)
# polyphyletic and rogue GTDB representatives
self.poly_rogue_gtdb_reps(domain_gids,
taxa_gid_map,
gtdb_decorate_table)
|
flexible
|
{
"blob_id": "53909b750f259b67b061ba26d604e0c2556376df",
"index": 9560,
"step-1": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n <mask token>\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n <mask token>\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-2": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n <mask token>\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-3": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n\n def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,\n gtdb_prev_sp_clusters):\n \"\"\"New GTDB representatives.\"\"\"\n self.logger.info('Identifying previous GTDB representatives.')\n prev_rids = set()\n with open(gtdb_prev_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n prev_rids.add(rid)\n self.logger.info(' - identified {:,} previous GTDB representatives.'\n .format(len(prev_rids)))\n self.logger.info('Identifying current GTDB representatives.')\n cur_rids = set()\n with open(gtdb_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n cur_rids.add(rid)\n self.logger.info(' - identified {:,} current GTDB representatives.'\n .format(len(cur_rids)))\n self.logger.info(\n 'Creating curation list and pseudo-tree of new GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_new_reps.{self.domain}.lst')\n fout = open(out_file, 'w')\n new_rids = set()\n for rid in cur_rids:\n if rid in domain_gids and rid not in prev_rids:\n fout.write('{}\\n'.format(rid))\n new_rids.add(rid)\n fout.close()\n self.logger.info(' - identified {:,} new GTDB representatives.'.\n format(len(new_rids)))\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-4": "__author__ = 'Donovan Parks'\n__copyright__ = 'Copyright 2020'\n__credits__ = ['Donovan Parks']\n__license__ = 'GPL3'\n__version__ = '0.0.1'\n__maintainer__ = 'Donovan Parks'\n__email__ = '[email protected]'\n__status__ = 'Development'\nimport sys\nimport argparse\nimport re\nimport datetime\nimport os\nimport logging\nimport time\nimport math\nfrom collections import defaultdict, namedtuple\nfrom biolib.common import canonical_gid\n\n\nclass CurationLists(object):\n \"\"\"Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names.\"\"\"\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n\n def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,\n gtdb_prev_sp_clusters):\n \"\"\"New GTDB representatives.\"\"\"\n self.logger.info('Identifying previous GTDB representatives.')\n prev_rids = set()\n with open(gtdb_prev_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n prev_rids.add(rid)\n self.logger.info(' - identified {:,} previous GTDB representatives.'\n .format(len(prev_rids)))\n self.logger.info('Identifying current GTDB representatives.')\n cur_rids = set()\n with open(gtdb_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n cur_rids.add(rid)\n self.logger.info(' - identified {:,} current GTDB representatives.'\n .format(len(cur_rids)))\n self.logger.info(\n 'Creating curation list and pseudo-tree of new GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_new_reps.{self.domain}.lst')\n fout = open(out_file, 'w')\n new_rids = set()\n for rid in cur_rids:\n if rid in domain_gids and rid not in prev_rids:\n fout.write('{}\\n'.format(rid))\n new_rids.add(rid)\n fout.close()\n self.logger.info(' - identified {:,} new GTDB representatives.'.\n format(len(new_rids)))\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-5": "###############################################################################\r\n# #\r\n# This program is free software: you can redistribute it and/or modify #\r\n# it under the terms of the GNU General Public License as published by #\r\n# the Free Software Foundation, either version 3 of the License, or #\r\n# (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\r\n# #\r\n###############################################################################\r\n\r\n__author__ = 'Donovan Parks'\r\n__copyright__ = 'Copyright 2020'\r\n__credits__ = ['Donovan Parks']\r\n__license__ = 'GPL3'\r\n__version__ = '0.0.1'\r\n__maintainer__ = 'Donovan Parks'\r\n__email__ = '[email protected]'\r\n__status__ = 'Development'\r\n\r\nimport sys\r\nimport argparse\r\nimport re\r\nimport datetime\r\nimport os\r\nimport logging\r\nimport time\r\nimport math\r\nfrom collections import defaultdict, namedtuple\r\n\r\nfrom biolib.common import canonical_gid\r\n\r\nclass CurationLists(object):\r\n \"\"\"Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names.\"\"\"\r\n \r\n def __init__(self, domain, output_dir):\r\n \"\"\"Initialization.\"\"\"\r\n \r\n self.domain = domain\r\n self.output_dir = output_dir\r\n self.logger = logging.getLogger('timestamp')\r\n \r\n def pseudo_tree(self, gids, out_tree):\r\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\r\n \r\n pseudo_tree = '('\r\n pseudo_tree += ','.join(gids)\r\n pseudo_tree += ');'\r\n \r\n fout = open(out_tree, 'w')\r\n fout.write(pseudo_tree)\r\n fout.close()\r\n \r\n def new_gtdb_reps(self,\r\n domain_gids,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters):\r\n \"\"\"New GTDB representatives.\"\"\"\r\n\r\n self.logger.info('Identifying previous GTDB representatives.')\r\n prev_rids = set()\r\n with open(gtdb_prev_sp_clusters) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.strip().split('\\t')\r\n rid = canonical_gid(tokens[0])\r\n prev_rids.add(rid)\r\n self.logger.info(' - identified {:,} previous GTDB representatives.'.format(\r\n len(prev_rids)))\r\n\r\n self.logger.info('Identifying current GTDB representatives.')\r\n cur_rids = set()\r\n with open(gtdb_sp_clusters) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.strip().split('\\t')\r\n rid = canonical_gid(tokens[0])\r\n cur_rids.add(rid)\r\n self.logger.info(' - identified {:,} current GTDB representatives.'.format(\r\n len(cur_rids)))\r\n\r\n self.logger.info('Creating curation list and pseudo-tree of new GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_new_reps.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n new_rids = set()\r\n for rid in cur_rids:\r\n if rid in domain_gids and rid not in prev_rids:\r\n fout.write('{}\\n'.format(rid))\r\n new_rids.add(rid)\r\n fout.close()\r\n self.logger.info(' - identified {:,} new GTDB representatives.'.format(\r\n len(new_rids)))\r\n \r\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\r\n\r\n def poly_rogue_gtdb_reps(self,\r\n domain_gids,\r\n taxa_gid_map,\r\n gtdb_decorate_table):\r\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\r\n \r\n self.logger.info('Identifying polyphyletic and rogue GTDB representatives.')\r\n poly_taxa_count = 0\r\n poly_gids = set()\r\n rogue_gids = set()\r\n with open(gtdb_decorate_table) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.split('\\t')\r\n \r\n taxon = tokens[0]\r\n fmeasure = float(tokens[2])\r\n rogue_in = tokens[7].strip()\r\n rogue_out = tokens[8].strip()\r\n if fmeasure < 1.0:\r\n poly_taxa_count += 1\r\n poly_gids.update(taxa_gid_map[taxon])\r\n \r\n if rogue_in:\r\n for gid in rogue_in.split(','):\r\n gid = canonical_gid(gid.strip())\r\n if not gid.startswith('D-'):\r\n rogue_gids.add(gid)\r\n \r\n if rogue_out:\r\n for gid in rogue_out.split(','):\r\n gid = canonical_gid(gid.strip())\r\n if not gid.startswith('D-'):\r\n rogue_gids.add(gid)\r\n\r\n self.logger.info(' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'.format(\r\n poly_taxa_count,\r\n len(poly_gids)))\r\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.format(\r\n len(rogue_gids)))\r\n\r\n self.logger.info('Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_poly_taxa.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n for gid in poly_gids:\r\n fout.write('{}\\n'.format(gid))\r\n fout.close()\r\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\r\n \r\n self.logger.info('Creating curation lists and pseudo-trees of rogue GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_rogues.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n for gid in rogue_gids:\r\n fout.write('{}\\n'.format(gid))\r\n fout.close()\r\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\r\n \r\n def run(self,\r\n gtdb_init_taxonomy,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters,\r\n gtdb_decorate_table):\r\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\r\n\r\n # get genomes\r\n self.logger.info('Identifying taxonomic assignment of genomes.')\r\n taxa_gid_map = defaultdict(set)\r\n domain_gids = set()\r\n for line in open(gtdb_init_taxonomy):\r\n tokens = line.strip().split('\\t')\r\n gid = canonical_gid(tokens[0])\r\n \r\n taxa = [t.strip() for t in tokens[1].split(';')]\r\n for taxon in taxa:\r\n taxa_gid_map[taxon].add(gid)\r\n \r\n domain_gids.add(gid)\r\n self.logger.info(' - identified {:,} genomes.'.format(\r\n len(domain_gids)))\r\n \r\n # new GTDB representatives\r\n self.new_gtdb_reps(domain_gids,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters)\r\n \r\n # polyphyletic and rogue GTDB representatives\r\n self.poly_rogue_gtdb_reps(domain_gids,\r\n taxa_gid_map,\r\n gtdb_decorate_table)",
"step-ids": [
4,
5,
6,
9,
10
]
}
|
[
4,
5,
6,
9,
10
] |
#basic API start
from flask import Flask, jsonify, abort, request
from cruiseItem import cruiseItem
from sqlalchemy import create_engine
from json import dumps
db_connect = create_engine('sqlite:///Carnivorecruise.sqlite')
app = Flask(__name__)
app.json_encoder.default = lambda self, o: o.to_joson()
app.app_context()
# Array to store the objects
InventoryArr = {}
HistoryArr = {}
def get_cruiseitemArr():
conn = db_connect.connect() # connect to database
query = conn.execute("select * from CruiseItem") #Perform query for all CruiseItems in db
InventoryArr = query.cursor.fetchall()
print(InventoryArr)
return jsonify(InventoryArr)
def get_cruiseitemArr_byLoc(Location):
conn = db_connect.connect() #connect to database
query = conn.execute("select * from Cruiseitem where fromLocation ='%s'"%str(Location))
InventoryArr = query.cursor.fetchall()
print(InventoryArr)
return jsonify(query) #convert query result into a json
def get_cruiseHistory():
conn = db_connect.connect() # connect to database
query = conn.execute("select * from cruiseHistory")
HistoryArr = query.cursor.fetchall()
print(HistoryArr)
@app.route('/inventory', methods=['GET'])
def get_cruiseitems():
return jsonify(status="ok",InventoryArr=get_cruiseitemArr())
@app.route('/inventory/location/<Location>', methods=['GET'])
def get_cruiseitems_by_location(Location):
return jsonify(status="ok", InventoryArr=get_cruiseitemArr_byLoc(Location))
if __name__ == '__main__':
app.run("0.0.0.0", 80)
|
normal
|
{
"blob_id": "65bfb59a255b42854eec8b55b28711737cfc46c2",
"index": 9325,
"step-1": "<mask token>\n\n\ndef get_cruiseitemArr():\n conn = db_connect.connect()\n query = conn.execute('select * from CruiseItem')\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(InventoryArr)\n\n\ndef get_cruiseitemArr_byLoc(Location):\n conn = db_connect.connect()\n query = conn.execute(\n \"select * from Cruiseitem where fromLocation ='%s'\" % str(Location))\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(query)\n\n\n<mask token>\n\n\[email protected]('/inventory', methods=['GET'])\ndef get_cruiseitems():\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr())\n\n\[email protected]('/inventory/location/<Location>', methods=['GET'])\ndef get_cruiseitems_by_location(Location):\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr_byLoc(Location))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cruiseitemArr():\n conn = db_connect.connect()\n query = conn.execute('select * from CruiseItem')\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(InventoryArr)\n\n\ndef get_cruiseitemArr_byLoc(Location):\n conn = db_connect.connect()\n query = conn.execute(\n \"select * from Cruiseitem where fromLocation ='%s'\" % str(Location))\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(query)\n\n\ndef get_cruiseHistory():\n conn = db_connect.connect()\n query = conn.execute('select * from cruiseHistory')\n HistoryArr = query.cursor.fetchall()\n print(HistoryArr)\n\n\[email protected]('/inventory', methods=['GET'])\ndef get_cruiseitems():\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr())\n\n\[email protected]('/inventory/location/<Location>', methods=['GET'])\ndef get_cruiseitems_by_location(Location):\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr_byLoc(Location))\n\n\n<mask token>\n",
"step-3": "<mask token>\napp.app_context()\n<mask token>\n\n\ndef get_cruiseitemArr():\n conn = db_connect.connect()\n query = conn.execute('select * from CruiseItem')\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(InventoryArr)\n\n\ndef get_cruiseitemArr_byLoc(Location):\n conn = db_connect.connect()\n query = conn.execute(\n \"select * from Cruiseitem where fromLocation ='%s'\" % str(Location))\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(query)\n\n\ndef get_cruiseHistory():\n conn = db_connect.connect()\n query = conn.execute('select * from cruiseHistory')\n HistoryArr = query.cursor.fetchall()\n print(HistoryArr)\n\n\[email protected]('/inventory', methods=['GET'])\ndef get_cruiseitems():\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr())\n\n\[email protected]('/inventory/location/<Location>', methods=['GET'])\ndef get_cruiseitems_by_location(Location):\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr_byLoc(Location))\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 80)\n",
"step-4": "from flask import Flask, jsonify, abort, request\nfrom cruiseItem import cruiseItem\nfrom sqlalchemy import create_engine\nfrom json import dumps\ndb_connect = create_engine('sqlite:///Carnivorecruise.sqlite')\napp = Flask(__name__)\napp.json_encoder.default = lambda self, o: o.to_joson()\napp.app_context()\nInventoryArr = {}\nHistoryArr = {}\n\n\ndef get_cruiseitemArr():\n conn = db_connect.connect()\n query = conn.execute('select * from CruiseItem')\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(InventoryArr)\n\n\ndef get_cruiseitemArr_byLoc(Location):\n conn = db_connect.connect()\n query = conn.execute(\n \"select * from Cruiseitem where fromLocation ='%s'\" % str(Location))\n InventoryArr = query.cursor.fetchall()\n print(InventoryArr)\n return jsonify(query)\n\n\ndef get_cruiseHistory():\n conn = db_connect.connect()\n query = conn.execute('select * from cruiseHistory')\n HistoryArr = query.cursor.fetchall()\n print(HistoryArr)\n\n\[email protected]('/inventory', methods=['GET'])\ndef get_cruiseitems():\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr())\n\n\[email protected]('/inventory/location/<Location>', methods=['GET'])\ndef get_cruiseitems_by_location(Location):\n return jsonify(status='ok', InventoryArr=get_cruiseitemArr_byLoc(Location))\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 80)\n",
"step-5": "#basic API start\r\nfrom flask import Flask, jsonify, abort, request\r\nfrom cruiseItem import cruiseItem\r\nfrom sqlalchemy import create_engine\r\nfrom json import dumps\r\n\r\ndb_connect = create_engine('sqlite:///Carnivorecruise.sqlite')\r\napp = Flask(__name__)\r\napp.json_encoder.default = lambda self, o: o.to_joson()\r\napp.app_context()\r\n\r\n# Array to store the objects\r\nInventoryArr = {}\r\nHistoryArr = {}\r\n\r\ndef get_cruiseitemArr():\r\n conn = db_connect.connect() # connect to database\r\n query = conn.execute(\"select * from CruiseItem\") #Perform query for all CruiseItems in db\r\n InventoryArr = query.cursor.fetchall()\r\n print(InventoryArr)\r\n return jsonify(InventoryArr)\r\n\r\ndef get_cruiseitemArr_byLoc(Location):\r\n conn = db_connect.connect() #connect to database\r\n query = conn.execute(\"select * from Cruiseitem where fromLocation ='%s'\"%str(Location))\r\n InventoryArr = query.cursor.fetchall()\r\n print(InventoryArr)\r\n return jsonify(query) #convert query result into a json\r\n\r\ndef get_cruiseHistory():\r\n conn = db_connect.connect() # connect to database\r\n query = conn.execute(\"select * from cruiseHistory\")\r\n HistoryArr = query.cursor.fetchall()\r\n print(HistoryArr)\r\n\r\[email protected]('/inventory', methods=['GET'])\r\ndef get_cruiseitems():\r\n return jsonify(status=\"ok\",InventoryArr=get_cruiseitemArr())\r\n\r\n\r\[email protected]('/inventory/location/<Location>', methods=['GET'])\r\ndef get_cruiseitems_by_location(Location):\r\n return jsonify(status=\"ok\", InventoryArr=get_cruiseitemArr_byLoc(Location))\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(\"0.0.0.0\", 80)\r\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
import os
import pathlib
import enum
import warnings
import colorama
import requests
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import invoke
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT +
f" {header_text} ".center(80, "=") +
colorama.Style.RESET_ALL
)
def print_sub_header(sub_header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT + "--" +
f" {sub_header_text} ".ljust(78, "-") +
colorama.Style.RESET_ALL
)
def print_success_message(success_message_text):
print(
colorama.Fore.GREEN + colorama.Style.BRIGHT +
f" {success_message_text}: Success ".center(80, "=") +
colorama.Style.RESET_ALL
)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(
colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" +
colorama.Style.RESET_ALL
)
raise
def get_base_config_path(driver_code, platform_code):
base_config = "molecule/molecule_base_{driver}_{platform}.yml".format(
driver=driver_code.name, platform=platform_code.name
)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / "molecule").iterdir():
if child_obj.is_dir():
if (child_obj / "molecule.yml").exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform="linux", env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({"MOLECULE_USER_NAME": "root"})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({"MOLECULE_USER_NAME": "vagrant"})
molecule_command = (
f"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}"
)
if scenario is not None:
molecule_command += f" -s {scenario}"
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if "AO_GITHUB_OAUTH_TOKEN" in os.environ:
headers = {"Authorization": "token " + os.environ["AO_GITHUB_OAUTH_TOKEN"]}
else:
headers = None
return requests.get(
"https://api.github.com/repos/" + release_url, headers=headers
).json()
|
normal
|
{
"blob_id": "5bdc08b66916959d462314b8a6e5794e5fa12b55",
"index": 7986,
"step-1": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\n<mask token>\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\n<mask token>\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-2": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\n<mask token>\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-3": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\ndef print_header(header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.\n center(80, '=') + colorama.Style.RESET_ALL)\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" + colorama\n .Style.RESET_ALL)\n raise\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-4": "<mask token>\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=DeprecationWarning)\n import invoke\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\ndef print_header(header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.\n center(80, '=') + colorama.Style.RESET_ALL)\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" + colorama\n .Style.RESET_ALL)\n raise\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-5": "import os\nimport pathlib\nimport enum\nimport warnings\nimport colorama\nimport requests\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import invoke\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\ndef print_header(header_text):\n print(\n colorama.Fore.CYAN + colorama.Style.BRIGHT +\n f\" {header_text} \".center(80, \"=\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef print_sub_header(sub_header_text):\n print(\n colorama.Fore.CYAN + colorama.Style.BRIGHT + \"--\" +\n f\" {sub_header_text} \".ljust(78, \"-\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef print_success_message(success_message_text):\n print(\n colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f\" {success_message_text}: Success \".center(80, \"=\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(\n colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" +\n colorama.Style.RESET_ALL\n )\n raise\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = \"molecule/molecule_base_{driver}_{platform}.yml\".format(\n driver=driver_code.name, platform=platform_code.name\n )\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / \"molecule\").iterdir():\n if child_obj.is_dir():\n if (child_obj / \"molecule.yml\").exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform=\"linux\", env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({\"MOLECULE_USER_NAME\": \"root\"})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({\"MOLECULE_USER_NAME\": \"vagrant\"})\n molecule_command = (\n f\"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}\"\n )\n if scenario is not None:\n molecule_command += f\" -s {scenario}\"\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\ndef get_github_release_info(release_url):\n if \"AO_GITHUB_OAUTH_TOKEN\" in os.environ:\n headers = {\"Authorization\": \"token \" + os.environ[\"AO_GITHUB_OAUTH_TOKEN\"]}\n else:\n headers = None\n return requests.get(\n \"https://api.github.com/repos/\" + release_url, headers=headers\n ).json()\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
import unittest
import numpy
import pandas as pd
import fixtures.examples_validate as examples
from cellxgene_schema.validate import Validator
from cellxgene_schema.write_labels import AnnDataLabelAppender
# Tests for schema compliance of an AnnData object
class TestValidAnndata(unittest.TestCase):
"""
Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.
The valid AnnData object has all valid cases described in the schema.
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_valid_anndata(self):
self.validator.validate_adata()
self.assertFalse(self.validator.errors)
class TestH5adValidation(unittest.TestCase):
"""
Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes
below
"""
def setUp(self):
self.h5ad_valid_file = examples.h5ad_valid
self.h5ad_invalid_file = examples.h5ad_invalid
self.validator = Validator()
def test_validate(self):
# Valid h5ad
self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))
# Invalid h5ads
self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))
class TestExpressionMatrix(unittest.TestCase):
"""
Fail cases for expression matrices (anndata.X and anndata.raw.X)
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_shapes(self):
"""
All matrix layers MUST have the same shape, and have the same cell labels and gene labels.
"""
# Creates a raw layer
self.validator.adata.raw = self.validator.adata
self.validator.adata.raw.var.drop("feature_is_filtered", axis=1, inplace=True)
self.validator.adata.X = examples.adata_non_raw.X.copy()
self.validator.adata.uns["X_normalization"] = "CPM"
# remove one gene
self.validator.adata = self.validator.adata[:, 1:]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Number of genes in X (3) is different than raw.X (4)."],
)
def test_sparsity(self):
"""
In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that
the matrix be encoded as a scipy.sparse.csr_matrix
"""
self.validator.adata.X = self.validator.adata.X.toarray()
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, "
"and it is not a 'scipy.sparse.csr_matrix'. It is "
"STRONGLY RECOMMENDED to use this type of matrix for "
"the given sparsity."
],
)
def test_raw_existence(self):
"""
Except for ATAC-seq and methylation data, raw data is REQUIRED
"""
# RNA - raw layer required
del self.validator.adata.raw
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'."
],
)
# ATAC - raw layer not required
# The assignment above makes X to not be raw: self.validator.adata.uns["X_normalization"] = "CPM"
# The following line makes it to be scATAC-seq data (EFO:0010891)
# Missing raw data in atac-seq data is allowed, thus the following should not return an error message
self.validator.errors = []
self.validator.adata.obs["assay_ontology_term_id"] = "EFO:0010891"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
def test_final_strongly_recommended(self):
"""
Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED
"""
# move raw to X amd: i.e. there is no final
self.validator.adata.X = self.validator.adata.raw.X
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. "
"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided."
],
)
class TestObs(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_column_presence(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
"""
columns = [
"assay_ontology_term_id",
"development_stage_ontology_term_id",
"disease_ontology_term_id",
"ethnicity_ontology_term_id",
"is_primary_data",
"sex_ontology_term_id",
"tissue_ontology_term_id",
]
for column in columns:
with self.subTest(column=column):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
self.validator.adata.obs.drop(column, axis=1, inplace=True)
# Remove batch condition because it has a dependency with is_primary_data
self.validator.adata.uns.pop("batch_condition")
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[f"ERROR: Dataframe 'obs' is missing " f"column '{column}'."],
)
def test_column_presence_organism(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
A separate check is need for organism_ontology_term_id because removing from anndata results in multiple
errors given that other columns depend on its presence
"""
self.validator.adata.obs.drop("organism_ontology_term_id", axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Dataframe 'obs' is missing column "
"'organism_ontology_term_id'.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['ethnicity_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['development_stage_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
],
)
def test_obsolete_term_id(self):
"""
Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310
for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by
EFO:0009899 for 10x 3' v2.
https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0009310"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.",
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id "
"of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_assay_ontology_term_id(self):
"""
assay_ontology_term_id categorical with str categories.
This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183"
If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to
the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)"
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "CL:000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid "
"ontology term id of 'EFO'.",
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child "
"term id of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
# Not a valid child
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0000001"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a "
"child term id of '[['EFO:0002772', 'EFO:0010183']]'."
],
)
# Not a clarifying text
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0010183 sci-plex"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.",
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of "
"'[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_cell_type_ontology_term_id(self):
"""
cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.
"""
# Not a valid term
self.validator.adata.obs["cell_type_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid "
"ontology term id of 'CL'."
],
)
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
],
)
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' "
"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
],
)
def test_development_stage_ontology_term_id_all_species(self):
"""
All other it MUST be children of UBERON:0000105 and not UBERON:0000071
"""
# Fail case not an UBERON term
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not "
"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
# All other it MUST be children of UBERON:0000105 and not UBERON:0000071
# Fail case UBERON:0000071
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "UBERON:0000071"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When "
"'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
def test_disease_ontology_term_id(self):
"""
disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or
PATO:0000461 for normal or healthy.
"""
# Invalid ontology
self.validator.adata.obs["disease_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a "
"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
# Invalid PATO term id
self.validator.errors = []
self.validator.adata.obs["disease_ontology_term_id"][0] = "PATO:0001894"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. "
"Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
def test_ethnicity_ontology_term_id(self):
"""
ethnicity_ontology_term_id categorical with str categories.
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be either a HANCESTRO term or "unknown" if unavailable.
Otherwise, for all other organisms this MUST be "na".
"""
# If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
# this MUST be either a HANCESTRO term or "unknown" if unavailable.
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is "
"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'."
],
)
# Otherwise, for all other organisms this MUST be "na". Below is the test case for mouse data.
# development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there
# will be an error in that field.
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "MmusDv:0000003"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a "
"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'."
],
)
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
# Setting "organism_ontology_term_id" to "EFO:0000001" is the fail case. However since this represents neither
# human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid
# other error messages: "development_stage_ontology_term_id" and "ethnicity_ontology_term_id"
self.validator.adata.obs["organism_ontology_term_id"][0] = "EFO:0000001"
self.validator.adata.obs["development_stage_ontology_term_id"][0] = "unknown"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid "
"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
],
)
def test_tissue_ontology_term_id_base(self):
"""
tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue
that this cell was derived from, depending on the type of biological sample:
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a "
"valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_cell_culture(self):
"""
Cell Culture - MUST be a CL term appended with " (cell culture)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][
0
] = "CL:0000057 (CELL culture)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_organoid(self):
"""
Organoid - MUST be an UBERON term appended with " (organoid)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "CL:0000057 (ORGANOID)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs["sex_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is "
"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', "
"or 'unknown' are allowed."
],
)
def test_is_primary_data(self):
"""
is_primary_data bool. This MUST be True if this is the canonical instance of this cellular
observation and False if not. This is commonly False
for meta-analyses reusing data or for secondary views of data.
"""
self.validator.adata.obs["is_primary_data"] = "FALSE"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Column 'is_primary_data' in dataframe 'obs' "
"must be boolean, not 'object'."
],
)
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
# Swap first row for second one
var = Validator.getattr_anndata(self.validator.adata, "var")
# First swap the index
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
# Then swap the actual rows
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print("FOO", self.validator.errors)
self.assertEqual(
self.validator.errors,
["ERROR: Index of 'raw.var' is not identical to index of 'var'."],
)
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
# Duplicate 1st row in var and assign it to 2nd
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
],
)
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ["feature_is_filtered", "feature_biotype"]
for component_name in ["var", "raw.var"]:
for column in columns:
if column == "feature_is_filtered" and component_name == "raw.var":
continue
with self.subTest(component_name=component_name, column=column):
# Resetting validator
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Dataframe '{component_name}' is missing "
f"column '{column}'."
],
)
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
# Duplicate 1st row in var and assigned to 2nd
self.validator.adata.var["feature_is_filtered"][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', "
"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. "
"All values for these features must be 0."
],
)
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns["X_normalization"] = "CPM"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."],
)
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSEBML_NOGENE"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' "
f"in '{component_name}', make sure it is a valid ID."
],
)
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSG000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
],
)
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ERCC-000000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "spike-in"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
],
)
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns["schema_version"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: adata has no schema definition in 'adata.uns'. "
"Validation cannot be performed."
],
)
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'title' in 'uns' is not present."]
)
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns["X_normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'X_normalization' in 'uns' is not present."]
)
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns["title"] = " There is a leading space"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
],
)
self.validator.adata.uns["title"] = "There is a trailing space "
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
],
)
self.validator.adata.uns["title"] = "There are double spaces"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
],
)
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns["schema_version"] = "1.0.0"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. "
"Validation cannot be performed."
],
)
def test_title(self):
"""
Title MUST be a string
"""
# list instead of string
self.validator.adata.uns["title"] = ["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['title']' in 'uns['title']' is not valid, "
"it must be a string."
],
)
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
# list instead of string
self.validator.adata.uns["X_normalization"] = ["normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['normalization']' in 'uns['X_normalization']' is "
"not valid, it must be a string."
],
)
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
# Assign a real value to X while X_normalization is 'none'
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
print("FOO", self.validator.warnings)
self.assertEqual(
self.validator.warnings,
[
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear "
"to have raw counts (integers)"
],
)
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
# Check valid case of numpy array which is interchangeable with lists
self.validator.adata.uns["batch_condition"] = numpy.array(
self.validator.adata.uns["batch_condition"]
)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Check fail case: not a list nor numpy array
self.validator.adata.uns["batch_condition"] = "cell_type_ontology_term_id"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' "
"is not valid, it must be a list or numpy array."
],
)
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns["batch_condition"] = ["NO_COLUMN"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a "
"column in 'adata.obs'."
],
)
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns["default_embedding"] = ["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, "
"it must be a string."
],
)
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns["default_embedding"] = "X_other"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, "
"it must be a key of 'adata.obsm'."
],
)
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
# Check valid case of "count" which is not included in valid object
self.validator.adata.uns["X_approximate_distribution"] = "count"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Invalid type: list
self.validator.adata.uns["X_approximate_distribution"] = ["count"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['count']' in 'uns['X_approximate_distribution']' "
"is not valid, it must be a string."
],
)
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns["X_approximate_distribution"] = "COUNT"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is "
"not valid. Allowed terms: ['count', 'normal']."
],
)
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm["X_tsne"] = pd.DataFrame(
self.validator.adata.obsm["X_umap"], index=self.validator.adata.obs_names
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings have to be of 'numpy.ndarray' type, "
"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
],
)
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm["umap"] = self.validator.adata.obsm["X_umap"]
self.validator.adata.uns["default_embedding"] = "umap"
del self.validator.adata.obsm["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: At least one embedding in 'obsm' has to have a "
"key with an 'X_' prefix."
],
)
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
# Makes 1 column array
self.validator.adata.obsm["X_umap"] = numpy.delete(
self.validator.adata.obsm["X_umap"], 0, 1
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings must have as many rows as cells, and "
"at least two columns.'adata.obsm['X_umap']' has shape "
"of '(2, 1)'."
],
)
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
# Manually created data (positive control)
cls.adata_with_labels = examples.adata_with_labels
# Validate test data
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
# Add labels through validator
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ["feature_name", "feature_reference"]:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in [
"assay",
"cell_type",
"development_stage",
"disease",
"ethnicity",
"organism",
"sex",
"tissue",
]:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
|
normal
|
{
"blob_id": "f4306f80330850415b74d729384f360489644e39",
"index": 354,
"step-1": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-2": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-3": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n <mask token>\n <mask token>\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n <mask token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <mask token>\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <mask token>\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-4": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-5": "import unittest\n\nimport numpy\nimport pandas as pd\n\nimport fixtures.examples_validate as examples\nfrom cellxgene_schema.validate import Validator\nfrom cellxgene_schema.write_labels import AnnDataLabelAppender\n\n\n# Tests for schema compliance of an AnnData object\n\n\nclass TestValidAnndata(unittest.TestCase):\n\n \"\"\"\n Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.\n\n The valid AnnData object has all valid cases described in the schema.\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n\n # Valid h5ad\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n\n # Invalid h5ads\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )\n\n def test_sparsity(self):\n\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )\n\n def test_raw_existence(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n\n # RNA - raw layer required\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ],\n )\n\n # ATAC - raw layer not required\n # The assignment above makes X to not be raw: self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n # The following line makes it to be scATAC-seq data (EFO:0010891)\n # Missing raw data in atac-seq data is allowed, thus the following should not return an error message\n self.validator.errors = []\n self.validator.adata.obs[\"assay_ontology_term_id\"] = \"EFO:0010891\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n\n # move raw to X amd: i.e. there is no final\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. \"\n \"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ],\n )\n\n\nclass TestObs(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n\n columns = [\n \"assay_ontology_term_id\",\n \"development_stage_ontology_term_id\",\n \"disease_ontology_term_id\",\n \"ethnicity_ontology_term_id\",\n \"is_primary_data\",\n \"sex_ontology_term_id\",\n \"tissue_ontology_term_id\",\n ]\n\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n # Remove batch condition because it has a dependency with is_primary_data\n self.validator.adata.uns.pop(\"batch_condition\")\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [f\"ERROR: Dataframe 'obs' is missing \" f\"column '{column}'.\"],\n )\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n\n self.validator.adata.obs.drop(\"organism_ontology_term_id\", axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Dataframe 'obs' is missing column \"\n \"'organism_ontology_term_id'.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['ethnicity_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['development_stage_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n ],\n )\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0009310\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\",\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id \"\n \"of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_assay_ontology_term_id(self):\n\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"CL:000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid \"\n \"ontology term id of 'EFO'.\",\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child \"\n \"term id of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n # Not a valid child\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a \"\n \"child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ],\n )\n\n # Not a clarifying text\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0010183 sci-plex\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\",\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of \"\n \"'[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_cell_type_ontology_term_id(self):\n\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"cell_type_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid \"\n \"ontology term id of 'CL'.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_human(self):\n\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' \"\n \"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_all_species(self):\n\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n\n # Fail case not an UBERON term\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not \"\n \"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n # All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n # Fail case UBERON:0000071\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"UBERON:0000071\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When \"\n \"'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n def test_disease_ontology_term_id(self):\n\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n\n # Invalid ontology\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a \"\n \"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n # Invalid PATO term id\n self.validator.errors = []\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"PATO:0001894\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. \"\n \"Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n def test_ethnicity_ontology_term_id(self):\n\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n\n # If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n # this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is \"\n \"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ],\n )\n\n # Otherwise, for all other organisms this MUST be \"na\". Below is the test case for mouse data.\n # development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there\n # will be an error in that field.\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"MmusDv:0000003\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a \"\n \"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ],\n )\n\n def test_organism_ontology_term_id(self):\n\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n\n # Setting \"organism_ontology_term_id\" to \"EFO:0000001\" is the fail case. However since this represents neither\n # human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid\n # other error messages: \"development_stage_ontology_term_id\" and \"ethnicity_ontology_term_id\"\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][0] = \"unknown\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid \"\n \"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ],\n )\n\n def test_tissue_ontology_term_id_base(self):\n\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a \"\n \"valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_cell_culture(self):\n\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][\n 0\n ] = \"CL:0000057 (CELL culture)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_organoid(self):\n\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"CL:0000057 (ORGANOID)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_sex_ontology_term_id(self):\n\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n\n self.validator.adata.obs[\"sex_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is \"\n \"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', \"\n \"or 'unknown' are allowed.\"\n ],\n )\n\n def test_is_primary_data(self):\n\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n\n self.validator.adata.obs[\"is_primary_data\"] = \"FALSE\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' \"\n \"must be boolean, not 'object'.\"\n ],\n )\n\n\nclass TestVar(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n\n # Swap first row for second one\n var = Validator.getattr_anndata(self.validator.adata, \"var\")\n\n # First swap the index\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n\n # Then swap the actual rows\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n\n self.validator.validate_adata()\n print(\"FOO\", self.validator.errors)\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"],\n )\n\n def test_check_unique_var(self):\n\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n # Duplicate 1st row in var and assign it to 2nd\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ],\n )\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )\n\n def test_feature_is_filtered(self):\n\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )\n\n def test_columns_not_in_raw_var(self):\n\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )\n\n def test_feature_id_wrong_format(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSEBML_NOGENE\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' \"\n f\"in '{component_name}', make sure it is a valid ID.\"\n ],\n )\n\n def test_feature_id_non_existent_ensembl(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSG000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n def test_feature_id_non_existent_ercc(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ERCC-000000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"spike-in\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n\nclass TestUns(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_required_fields_title(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n\n del self.validator.adata.uns[\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'title' in 'uns' is not present.\"]\n )\n\n def test_required_fields_X_normalization(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n\n del self.validator.adata.uns[\"X_normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'X_normalization' in 'uns' is not present.\"]\n )\n\n def test_leading_trailing_double_spaces_in_strings(self):\n\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n\n self.validator.adata.uns[\"title\"] = \" There is a leading space\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There is a trailing space \"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There are double spaces\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ],\n )\n\n def test_schema_version(self):\n\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_title(self):\n\n \"\"\"\n Title MUST be a string\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_X_normalization_is_str(self):\n\n \"\"\"\n X_normalization str.\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"X_normalization\"] = [\"normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is \"\n \"not valid, it must be a string.\"\n ],\n )\n\n def test_X_normalization_not_raw(self):\n\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )\n\n def test_batch_condition_is_list(self):\n\n \"\"\"\n batch_condition list[str]\n \"\"\"\n\n # Check valid case of numpy array which is interchangeable with lists\n self.validator.adata.uns[\"batch_condition\"] = numpy.array(\n self.validator.adata.uns[\"batch_condition\"]\n )\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Check fail case: not a list nor numpy array\n self.validator.adata.uns[\"batch_condition\"] = \"cell_type_ontology_term_id\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' \"\n \"is not valid, it must be a list or numpy array.\"\n ],\n )\n\n def test_batch_condition_is_column_from_obs(self):\n\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n\n self.validator.adata.uns[\"batch_condition\"] = [\"NO_COLUMN\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a \"\n \"column in 'adata.obs'.\"\n ],\n )\n\n def test_default_embedding_is_str(self):\n\n \"\"\"\n Default_embedding str.\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = [\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_default_embedding_is_key_from_obsm(self):\n\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = \"X_other\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, \"\n \"it must be a key of 'adata.obsm'.\"\n ],\n )\n\n def test_X_approximate_distribution_is_str(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )\n\n def test_X_approximate_distribution_is_valid(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"COUNT\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is \"\n \"not valid. Allowed terms: ['count', 'normal'].\"\n ],\n )\n\n\nclass TestObsm(unittest.TestCase):\n\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n\n self.validator.adata.obsm[\"X_tsne\"] = pd.DataFrame(\n self.validator.adata.obsm[\"X_umap\"], index=self.validator.adata.obs_names\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, \"\n \"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ],\n )\n\n def test_obsm_values_at_least_one_X(self):\n\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n\n self.validator.adata.obsm[\"umap\"] = self.validator.adata.obsm[\"X_umap\"]\n self.validator.adata.uns[\"default_embedding\"] = \"umap\"\n del self.validator.adata.obsm[\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: At least one embedding in 'obsm' has to have a \"\n \"key with an 'X_' prefix.\"\n ],\n )\n\n def test_obsm_shape(self):\n\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n\n # Makes 1 column array\n self.validator.adata.obsm[\"X_umap\"] = numpy.delete(\n self.validator.adata.obsm[\"X_umap\"], 0, 1\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings must have as many rows as cells, and \"\n \"at least two columns.'adata.obsm['X_umap']' has shape \"\n \"of '(2, 1)'.\"\n ],\n )\n\n\nclass TestAddingLabels(unittest.TestCase):\n\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n\n # Manually created data (positive control)\n cls.adata_with_labels = examples.adata_with_labels\n\n # Validate test data\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n\n # Add labels through validator\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n\n for column in [\"feature_name\", \"feature_reference\"]:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n\n for column in [\n \"assay\",\n \"cell_type\",\n \"development_stage\",\n \"disease\",\n \"ethnicity\",\n \"organism\",\n \"sex\",\n \"tissue\",\n ]:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-ids": [
43,
45,
50,
57,
75
]
}
|
[
43,
45,
50,
57,
75
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.